In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
'''1. Problem Statement – Real estate agents want help to predict the house price for regions in the USA.
He gave you the dataset to work on and you decided to use the Linear Regression Model. Create a
model that will help him to estimate what the house would sell for.
URL for a dataset:
https://github.com/huzaifsayed/Linear-Regression-Model-for-House-PricePrediction/blob/master/USA_Housing.csv
In [ ]:
 
In [7]:
# Step 1: Importing libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score



data = pd.read_csv('usa_housing.csv')


# Step 3: Handling non-numeric columns
# Drop any non-numeric columns
data = data.select_dtypes(include=[np.number])  # This keeps only numeric columns

# Step 4: Exploratory Data Analysis (EDA)
print(data.head())  # Display first few rows of the dataset
print(data.info())  # Information about data types and missing values
print(data.describe())  # Statistical summary

# Visualizing correlation between features and price
plt.figure(figsize=(10, 8))
sns.heatmap(data.corr(), annot=True, cmap='coolwarm', fmt='.2f')
plt.show()

# Step 5: Preprocessing the data
# Assuming 'Price' is the target variable and others are features
X = data.drop('Price', axis=1)  # Features
y = data['Price']  # Target

# Step 6: Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# Step 7: Create the Linear Regression model
model = LinearRegression()

# Step 8: Train the model
model.fit(X_train, y_train)

# Step 9: Predict on test data
y_pred = model.predict(X_test)

# Step 10: Model evaluation
mse = mean_squared_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)

print(f"Mean Squared Error (MSE): {mse}")
print(f"R-squared (R2) value: {r2}")

# Step 11: Visualizing the predicted vs actual prices
plt.figure(figsize=(8, 6))
plt.scatter(y_test, y_pred)
plt.xlabel("Actual Prices")
plt.ylabel("Predicted Prices")
plt.title("Actual vs Predicted Prices")
plt.show()
   Avg. Area Income  Avg. Area House Age  Avg. Area Number of Rooms  \
0      79545.458574             5.682861                   7.009188   
1      79248.642455             6.002900                   6.730821   
2      61287.067179             5.865890                   8.512727   
3      63345.240046             7.188236                   5.586729   
4      59982.197226             5.040555                   7.839388   

   Avg. Area Number of Bedrooms  Area Population         Price  
0                          4.09     23086.800503  1.059034e+06  
1                          3.09     40173.072174  1.505891e+06  
2                          5.13     36882.159400  1.058988e+06  
3                          3.26     34310.242831  1.260617e+06  
4                          4.23     26354.109472  6.309435e+05  
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5000 entries, 0 to 4999
Data columns (total 6 columns):
 #   Column                        Non-Null Count  Dtype  
---  ------                        --------------  -----  
 0   Avg. Area Income              5000 non-null   float64
 1   Avg. Area House Age           5000 non-null   float64
 2   Avg. Area Number of Rooms     5000 non-null   float64
 3   Avg. Area Number of Bedrooms  5000 non-null   float64
 4   Area Population               5000 non-null   float64
 5   Price                         5000 non-null   float64
dtypes: float64(6)
memory usage: 234.5 KB
None
       Avg. Area Income  Avg. Area House Age  Avg. Area Number of Rooms  \
count       5000.000000          5000.000000                5000.000000   
mean       68583.108984             5.977222                   6.987792   
std        10657.991214             0.991456                   1.005833   
min        17796.631190             2.644304                   3.236194   
25%        61480.562388             5.322283                   6.299250   
50%        68804.286404             5.970429                   7.002902   
75%        75783.338666             6.650808                   7.665871   
max       107701.748378             9.519088                  10.759588   

       Avg. Area Number of Bedrooms  Area Population         Price  
count                   5000.000000      5000.000000  5.000000e+03  
mean                       3.981330     36163.516039  1.232073e+06  
std                        1.234137      9925.650114  3.531176e+05  
min                        2.000000       172.610686  1.593866e+04  
25%                        3.140000     29403.928702  9.975771e+05  
50%                        4.050000     36199.406689  1.232669e+06  
75%                        4.490000     42861.290769  1.471210e+06  
max                        6.500000     69621.713378  2.469066e+06  
No description has been provided for this image
Mean Squared Error (MSE): 10089009300.894522
R-squared (R2) value: 0.9179971706834288
No description has been provided for this image
In [ ]:
 
In [ ]:
#OR 
In [ ]:
 
In [9]:
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt

%matplotlib inline


HouseDF = pd.read_csv('usa_housing.csv')


HouseDF.head()

HouseDF.info()

HouseDF.describe()

HouseDF.columns

HouseDF = HouseDF.select_dtypes(include=[np.number])  # Only keep numeric columns

# Verify the columns again
print(HouseDF.columns)



#Exploratory Data Analysis for House Price Prediction


sns.pairplot(HouseDF)

sns.distplot(HouseDF['Price'])

sns.heatmap(HouseDF.corr(), annot=True)


#Split Data into Train, Test

X = HouseDF[['Avg. Area Income', 'Avg. Area House Age', 'Avg. Area Number of Rooms',
               'Avg. Area Number of Bedrooms', 'Area Population']]

y = HouseDF['Price']


#Creating and Training the LinearRegression Model


from sklearn.model_selection import train_test_split


X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=101)


from sklearn.linear_model import LinearRegression


lm = LinearRegression()

lm.fit(X_train,y_train)


#LinearRegression Model Evaluation

print(lm.intercept_)

coeff_df = pd.DataFrame(lm.coef_,X.columns,columns=['Coefficient'])
coeff_df

#Predictions from our Linear Regression Model


predictions = lm.predict(X_test)


plt.scatter(y_test,predictions)



sns.distplot((y_test-predictions),bins=50);


#Regression Evaluation Metrics

from sklearn import metrics

print('MAE:', metrics.mean_absolute_error(y_test, predictions))
print('MSE:', metrics.mean_squared_error(y_test, predictions))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, predictions)))
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5000 entries, 0 to 4999
Data columns (total 7 columns):
 #   Column                        Non-Null Count  Dtype  
---  ------                        --------------  -----  
 0   Avg. Area Income              5000 non-null   float64
 1   Avg. Area House Age           5000 non-null   float64
 2   Avg. Area Number of Rooms     5000 non-null   float64
 3   Avg. Area Number of Bedrooms  5000 non-null   float64
 4   Area Population               5000 non-null   float64
 5   Price                         5000 non-null   float64
 6   Address                       5000 non-null   object 
dtypes: float64(6), object(1)
memory usage: 273.6+ KB
Index(['Avg. Area Income', 'Avg. Area House Age', 'Avg. Area Number of Rooms',
       'Avg. Area Number of Bedrooms', 'Area Population', 'Price'],
      dtype='object')
C:\Users\Lenovo\AppData\Local\Temp\ipykernel_24060\1964582810.py:32: UserWarning: 

`distplot` is a deprecated function and will be removed in seaborn v0.14.0.

Please adapt your code to use either `displot` (a figure-level function with
similar flexibility) or `histplot` (an axes-level function for histograms).

For a guide to updating your code to use the new functions, please see
https://gist.github.com/mwaskom/de44147ed2974457ad6372750bbe5751

  sns.distplot(HouseDF['Price'])
-2640159.7968526953
MAE: 82288.22251914942
MSE: 10460958907.20898
RMSE: 102278.82922290899
C:\Users\Lenovo\AppData\Local\Temp\ipykernel_24060\1964582810.py:79: UserWarning: 

`distplot` is a deprecated function and will be removed in seaborn v0.14.0.

Please adapt your code to use either `displot` (a figure-level function with
similar flexibility) or `histplot` (an axes-level function for histograms).

For a guide to updating your code to use the new functions, please see
https://gist.github.com/mwaskom/de44147ed2974457ad6372750bbe5751

  sns.distplot((y_test-predictions),bins=50);
No description has been provided for this image
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
'''2. Build a Multiclass classifier using the CNN model. Use MNIST or any other suitable dataset. a.
Perform Data Pre-processing b. Define Model and perform training c. Evaluate Results using confusion
matrix
In [ ]:
 
In [11]:
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import seaborn as sns



#Data Preprocessing

# Load MNIST dataset
(X_train, y_train), (X_test, y_test) = mnist.load_data()

# Reshape the data to fit the CNN model (28x28 images, 1 color channel)
X_train = X_train.reshape(-1, 28, 28, 1).astype('float32')  # 60000 samples
X_test = X_test.reshape(-1, 28, 28, 1).astype('float32')  # 10000 samples

# Normalize the pixel values to the range [0, 1]
X_train /= 255.0
X_test /= 255.0

# One-hot encode the labels
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)



# Define the CNN model
model = Sequential()

# Add a Convolutional Layer with 32 filters, 3x3 kernel, and ReLU activation
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))

# Add MaxPooling Layer to down-sample the feature map
model.add(MaxPooling2D((2, 2)))

# Add a second Convolutional Layer
model.add(Conv2D(64, (3, 3), activation='relu'))

# Add another MaxPooling Layer
model.add(MaxPooling2D((2, 2)))

# Flatten the feature maps to a 1D vector for the fully connected layer
model.add(Flatten())

# Add a Fully Connected (Dense) Layer
model.add(Dense(128, activation='relu'))

# Output Layer with 10 units for 10 classes (digits 0-9) and softmax activation
model.add(Dense(10, activation='softmax'))



# Compile the model
model.compile(optimizer='adam', 
              loss='categorical_crossentropy', 
              metrics=['accuracy'])


# Train the model
history = model.fit(X_train, y_train, epochs=5, batch_size=64, validation_split=0.2)


# Evaluate the model on test data
test_loss, test_acc = model.evaluate(X_test, y_test)
print(f'Test accuracy: {test_acc}')



# Make predictions
y_pred = model.predict(X_test)

# Convert predictions and actual labels back from one-hot encoding to integer labels
y_pred_classes = np.argmax(y_pred, axis=1)
y_test_classes = np.argmax(y_test, axis=1)

# Confusion Matrix
cm = confusion_matrix(y_test_classes, y_pred_classes)

# Visualize the confusion matrix using a heatmap
plt.figure(figsize=(8, 6))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=np.arange(10), yticklabels=np.arange(10))
plt.xlabel('Predicted')
plt.ylabel('True')
plt.title('Confusion Matrix')
plt.show()
C:\Users\Lenovo\AppData\Local\Programs\Python\Python312\Lib\site-packages\keras\src\layers\convolutional\base_conv.py:99: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(
Epoch 1/5
750/750 ━━━━━━━━━━━━━━━━━━━━ 12s 14ms/step - accuracy: 0.8743 - loss: 0.4189 - val_accuracy: 0.9812 - val_loss: 0.0665
Epoch 2/5
750/750 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.9820 - loss: 0.0572 - val_accuracy: 0.9817 - val_loss: 0.0581
Epoch 3/5
750/750 ━━━━━━━━━━━━━━━━━━━━ 11s 14ms/step - accuracy: 0.9891 - loss: 0.0366 - val_accuracy: 0.9871 - val_loss: 0.0447
Epoch 4/5
750/750 ━━━━━━━━━━━━━━━━━━━━ 11s 15ms/step - accuracy: 0.9927 - loss: 0.0234 - val_accuracy: 0.9870 - val_loss: 0.0486
Epoch 5/5
750/750 ━━━━━━━━━━━━━━━━━━━━ 11s 15ms/step - accuracy: 0.9940 - loss: 0.0185 - val_accuracy: 0.9888 - val_loss: 0.0408
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.9875 - loss: 0.0349
Test accuracy: 0.9904999732971191
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step
No description has been provided for this image
In [ ]:
 
In [ ]:
 
In [ ]:
#OR
In [12]:
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
%matplotlib inline
from keras import layers


(x_train , y_train) , (x_test , y_test) = keras.datasets.mnist.load_data()


len(x_train)

x_train[0].shape

x_train[0]

plt.matshow(x_train[0])

def plot_input_img(i) :
    plt.imshow(x_train[i],cmap='binary')
    plt.title(i)
    plt.show()


for i in range(10):
  plot_input_img(i)


y_train[:5]


#data normalising
x_train = x_train/255
x_test = x_test/255

x_train


x_train_flattened = x_train.reshape(len(x_train),28*28)


x_train_flattened.shape


x_test_flattened = x_test.reshape(len(x_test),28*28)


x_test_flattened.shape


#modeling 


model = keras.Sequential([
    layers.Dense(10, input_shape=(784,), activation='sigmoid')
])
model.compile(
    optimizer='adam',
    loss='sparse_categorical_crossentropy',
    metrics=['accuracy']
)



model.fit(x_train_flattened,y_train,epochs=10)


model.evaluate(x_test_flattened,y_test)


plt.matshow(x_test[3])


y_predict = model.predict(x_test_flattened)
y_predict[3]


y_predict_labels = [np.argmax(i) for i in y_predict]
y_predict_labels[:5]


#Adding Hidden Layer
model = keras.Sequential([
    layers.Dense(100, input_shape=(784,), activation='relu'),
    layers.Dense(10, activation='sigmoid')
])
model.compile(
    optimizer='adam',
    loss='sparse_categorical_crossentropy',
    metrics=['accuracy']
)
model.fit(x_train_flattened,y_train,epochs=10)


# before using hidden layer
model.summary()


#summary after using hidden layer
model.summary()


#evaluation 


cm = tf.math.confusion_matrix(labels=y_test, predictions=y_predict_labels)


cm

import seaborn as sn
plt.figure(figsize=(10,7))
sn.heatmap(cm, annot=True, fmt='d')
plt.xlabel('Predicted')
plt.ylabel('Truth')


model.evaluate(x_test_flattened,y_test)



#after making use of hidden layer
import seaborn as sn
plt.figure(figsize=(10,7))
sn.heatmap(cm, annot=True, fmt='d')
plt.xlabel('Predicted')
plt.ylabel('Truth')
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
C:\Users\Lenovo\AppData\Local\Programs\Python\Python312\Lib\site-packages\keras\src\layers\core\dense.py:85: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 3s 1ms/step - accuracy: 0.8135 - loss: 0.7204
Epoch 2/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 2s 977us/step - accuracy: 0.9103 - loss: 0.3193
Epoch 3/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 2s 997us/step - accuracy: 0.9196 - loss: 0.2876
Epoch 4/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 2s 1ms/step - accuracy: 0.9242 - loss: 0.2776
Epoch 5/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 2s 1ms/step - accuracy: 0.9250 - loss: 0.2654
Epoch 6/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 2s 1ms/step - accuracy: 0.9284 - loss: 0.2618
Epoch 7/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 2s 1ms/step - accuracy: 0.9301 - loss: 0.2536
Epoch 8/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 2s 1ms/step - accuracy: 0.9302 - loss: 0.2496
Epoch 9/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 2s 1ms/step - accuracy: 0.9308 - loss: 0.2495
Epoch 10/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 2s 1ms/step - accuracy: 0.9320 - loss: 0.2485
313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 992us/step - accuracy: 0.9171 - loss: 0.3028
313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 883us/step
Epoch 1/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.8730 - loss: 0.4540
Epoch 2/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.9605 - loss: 0.1359
Epoch 3/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.9733 - loss: 0.0884
Epoch 4/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.9797 - loss: 0.0661
Epoch 5/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.9847 - loss: 0.0513
Epoch 6/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.9880 - loss: 0.0403
Epoch 7/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.9901 - loss: 0.0321
Epoch 8/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.9918 - loss: 0.0287
Epoch 9/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.9940 - loss: 0.0217
Epoch 10/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.9949 - loss: 0.0173
Model: "sequential_2"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓
┃ Layer (type)                         ┃ Output Shape                ┃         Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩
│ dense_3 (Dense)                      │ (None, 100)                 │          78,500 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dense_4 (Dense)                      │ (None, 10)                  │           1,010 │
└──────────────────────────────────────┴─────────────────────────────┴─────────────────┘
 Total params: 238,532 (931.77 KB)
 Trainable params: 79,510 (310.59 KB)
 Non-trainable params: 0 (0.00 B)
 Optimizer params: 159,022 (621.18 KB)
Model: "sequential_2"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓
┃ Layer (type)                         ┃ Output Shape                ┃         Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩
│ dense_3 (Dense)                      │ (None, 100)                 │          78,500 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dense_4 (Dense)                      │ (None, 10)                  │           1,010 │
└──────────────────────────────────────┴─────────────────────────────┴─────────────────┘
 Total params: 238,532 (931.77 KB)
 Trainable params: 79,510 (310.59 KB)
 Non-trainable params: 0 (0.00 B)
 Optimizer params: 159,022 (621.18 KB)
313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.9751 - loss: 0.0928
Out[12]:
Text(95.72222222222221, 0.5, 'Truth')
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
'''3. Design RNN or its variant including LSTM or GRU a) Select a suitable time series dataset.
Example – predict sentiments based on product reviews b) Apply for prediction
In [ ]:
 
In [ ]:
 
In [2]:
import numpy as np
import tensorflow as tf
from tensorflow.keras.datasets import imdb
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, LSTM, Dense
from tensorflow.keras.preprocessing.sequence import pad_sequences

# Load dataset, keeping only the top 10,000 most frequent words
num_words = 10000
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=num_words)

# Pad sequences to ensure equal input length
maxlen = 200  # max length of review
X_train = pad_sequences(X_train, maxlen=maxlen)
X_test = pad_sequences(X_test, maxlen=maxlen)


#Build the LSTM Model
model = Sequential()

# Embedding layer converts word indices to dense vectors
model = Sequential()

# Embedding layer without input_length
model.add(Embedding(input_dim=10000, output_dim=128))

# LSTM layer
model.add(LSTM(units=128, dropout=0.2, recurrent_dropout=0.2))

# Output layer
model.add(Dense(1, activation='sigmoid'))

# Compile the model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])


#Train the Model
model.fit(X_train, y_train, epochs=5, batch_size=64, validation_split=0.2)



# Evaluate the Model
score, acc = model.evaluate(X_test, y_test)
print(f'Test accuracy: {acc:.2f}')



#Plot Confusion Matrix
from sklearn.metrics import confusion_matrix
import seaborn as sns
import matplotlib.pyplot as plt

# Predict test set
y_pred = model.predict(X_test)
y_pred_classes = (y_pred > 0.5).astype("int32")

# Confusion matrix
cm = confusion_matrix(y_test, y_pred_classes)
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues')
plt.xlabel('Predicted')
plt.ylabel('True')
plt.title('Confusion Matrix')
plt.show()
Epoch 1/5
313/313 ━━━━━━━━━━━━━━━━━━━━ 77s 232ms/step - accuracy: 0.6950 - loss: 0.5704 - val_accuracy: 0.7828 - val_loss: 0.4601
Epoch 2/5
313/313 ━━━━━━━━━━━━━━━━━━━━ 71s 226ms/step - accuracy: 0.8554 - loss: 0.3543 - val_accuracy: 0.8198 - val_loss: 0.4052
Epoch 3/5
313/313 ━━━━━━━━━━━━━━━━━━━━ 72s 229ms/step - accuracy: 0.8771 - loss: 0.2999 - val_accuracy: 0.7444 - val_loss: 0.5007
Epoch 4/5
313/313 ━━━━━━━━━━━━━━━━━━━━ 71s 226ms/step - accuracy: 0.9040 - loss: 0.2516 - val_accuracy: 0.5966 - val_loss: 0.7077
Epoch 5/5
313/313 ━━━━━━━━━━━━━━━━━━━━ 72s 231ms/step - accuracy: 0.7943 - loss: 0.4156 - val_accuracy: 0.8340 - val_loss: 0.3947
782/782 ━━━━━━━━━━━━━━━━━━━━ 31s 39ms/step - accuracy: 0.8321 - loss: 0.3989
Test accuracy: 0.83
782/782 ━━━━━━━━━━━━━━━━━━━━ 31s 39ms/step
No description has been provided for this image
In [ ]:
 
In [ ]:
 
In [ ]:
#OR 
In [ ]:
 
In [2]:
import numpy as np
from tensorflow.keras.datasets import imdb
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, LSTM, Dense
from sklearn.metrics import classification_report, confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns


# Load dataset with only the top 10,000 most common words
vocab_size = 10000
max_len = 200  # pad or truncate reviews to this length

(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=vocab_size)

# Pad sequences
X_train = pad_sequences(X_train, maxlen=max_len)
X_test = pad_sequences(X_test, maxlen=max_len)


model = Sequential([
    Embedding(input_dim=vocab_size, output_dim=64),
    LSTM(128),
    Dense(1, activation='sigmoid')  # Binary classification (positive/negative)
])

model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])

# Train the model
model.fit(X_train, y_train, epochs=5, batch_size=128, validation_split=0.2)



# Evaluate
loss, accuracy = model.evaluate(X_test, y_test)
print(f"Test Accuracy: {accuracy*100:.2f}%")

# Predict
y_pred = (model.predict(X_test) > 0.5).astype("int32")

# Confusion Matrix
cm = confusion_matrix(y_test, y_pred)
plt.figure(figsize=(6,5))
sns.heatmap(cm, annot=True, fmt='d', cmap='Greens')
plt.title("Confusion Matrix")
plt.xlabel("Predicted")
plt.ylabel("True")
plt.show()

# Classification Report
print(classification_report(y_test, y_pred, target_names=["Negative", "Positive"]))
Epoch 1/5
157/157 ━━━━━━━━━━━━━━━━━━━━ 53s 309ms/step - accuracy: 0.6504 - loss: 0.6498 - val_accuracy: 0.8248 - val_loss: 0.3896
Epoch 2/5
157/157 ━━━━━━━━━━━━━━━━━━━━ 49s 313ms/step - accuracy: 0.8707 - loss: 0.3172 - val_accuracy: 0.8588 - val_loss: 0.3313
Epoch 3/5
157/157 ━━━━━━━━━━━━━━━━━━━━ 47s 300ms/step - accuracy: 0.9238 - loss: 0.2126 - val_accuracy: 0.8768 - val_loss: 0.3278
Epoch 4/5
157/157 ━━━━━━━━━━━━━━━━━━━━ 48s 303ms/step - accuracy: 0.9397 - loss: 0.1650 - val_accuracy: 0.8592 - val_loss: 0.3640
Epoch 5/5
157/157 ━━━━━━━━━━━━━━━━━━━━ 52s 323ms/step - accuracy: 0.9417 - loss: 0.1581 - val_accuracy: 0.8506 - val_loss: 0.3623
782/782 ━━━━━━━━━━━━━━━━━━━━ 33s 42ms/step - accuracy: 0.8526 - loss: 0.3677
Test Accuracy: 85.38%
782/782 ━━━━━━━━━━━━━━━━━━━━ 34s 43ms/step
No description has been provided for this image
              precision    recall  f1-score   support

    Negative       0.85      0.86      0.85     12500
    Positive       0.86      0.85      0.85     12500

    accuracy                           0.85     25000
   macro avg       0.85      0.85      0.85     25000
weighted avg       0.85      0.85      0.85     25000

In [11]:
pip install --upgrade tensorflow keras 
^C
  WARNING: Failed to remove contents in a temporary directory 'C:\Users\Lenovo\AppData\Local\Programs\Python\Python312\Lib\site-packages\~l_dtypes'.
  You can safely remove it manually.
Requirement already satisfied: tensorflow in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (2.16.0rc0)
Collecting tensorflow
  Downloading tensorflow-2.19.0-cp312-cp312-win_amd64.whl.metadata (4.1 kB)
Requirement already satisfied: keras in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (3.0.5)
Collecting keras
  Downloading keras-3.9.2-py3-none-any.whl.metadata (6.1 kB)
Requirement already satisfied: absl-py>=1.0.0 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from tensorflow) (2.1.0)
Requirement already satisfied: astunparse>=1.6.0 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from tensorflow) (1.6.3)
Collecting flatbuffers>=24.3.25 (from tensorflow)
  Downloading flatbuffers-25.2.10-py2.py3-none-any.whl.metadata (875 bytes)
Requirement already satisfied: gast!=0.5.0,!=0.5.1,!=0.5.2,>=0.2.1 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from tensorflow) (0.5.4)
Requirement already satisfied: google-pasta>=0.1.1 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from tensorflow) (0.2.0)
Requirement already satisfied: libclang>=13.0.0 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from tensorflow) (16.0.6)
Requirement already satisfied: opt-einsum>=2.3.2 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from tensorflow) (3.3.0)
Requirement already satisfied: packaging in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from tensorflow) (23.2)
Requirement already satisfied: protobuf!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<6.0.0dev,>=3.20.3 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from tensorflow) (4.21.12)
Requirement already satisfied: requests<3,>=2.21.0 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from tensorflow) (2.31.0)
Requirement already satisfied: setuptools in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from tensorflow) (75.3.0)
Requirement already satisfied: six>=1.12.0 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from tensorflow) (1.16.0)
Requirement already satisfied: termcolor>=1.1.0 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from tensorflow) (2.4.0)
Requirement already satisfied: typing-extensions>=3.6.6 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from tensorflow) (4.10.0)
Requirement already satisfied: wrapt>=1.11.0 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from tensorflow) (1.16.0)
Requirement already satisfied: grpcio<2.0,>=1.24.3 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from tensorflow) (1.62.0)
Collecting tensorboard~=2.19.0 (from tensorflow)
  Downloading tensorboard-2.19.0-py3-none-any.whl.metadata (1.8 kB)
Requirement already satisfied: numpy<2.2.0,>=1.26.0 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from tensorflow) (1.26.3)
Collecting h5py>=3.11.0 (from tensorflow)
  Downloading h5py-3.13.0-cp312-cp312-win_amd64.whl.metadata (2.5 kB)
Collecting ml-dtypes<1.0.0,>=0.5.1 (from tensorflow)
  Downloading ml_dtypes-0.5.1-cp312-cp312-win_amd64.whl.metadata (22 kB)
Requirement already satisfied: charset-normalizer<4,>=2 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from requests<3,>=2.21.0->tensorflow) (3.3.2)
Requirement already satisfied: idna<4,>=2.5 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from requests<3,>=2.21.0->tensorflow) (3.6)
Requirement already satisfied: urllib3<3,>=1.21.1 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from requests<3,>=2.21.0->tensorflow) (2.2.1)
Requirement already satisfied: certifi>=2017.4.17 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from requests<3,>=2.21.0->tensorflow) (2024.2.2)
Requirement already satisfied: markdown>=2.6.8 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from tensorboard~=2.19.0->tensorflow) (3.5.2)
Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from tensorboard~=2.19.0->tensorflow) (0.7.2)
Requirement already satisfied: werkzeug>=1.0.1 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from tensorboard~=2.19.0->tensorflow) (3.0.1)
Requirement already satisfied: rich in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from keras) (13.7.1)
Requirement already satisfied: namex in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from keras) (0.0.7)
Collecting optree (from keras)
  Downloading optree-0.15.0-cp312-cp312-win_amd64.whl.metadata (49 kB)
Requirement already satisfied: wheel<1.0,>=0.23.0 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from astunparse>=1.6.0->tensorflow) (0.44.0)
Requirement already satisfied: MarkupSafe>=2.1.1 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from werkzeug>=1.0.1->tensorboard~=2.19.0->tensorflow) (2.1.5)
Requirement already satisfied: markdown-it-py>=2.2.0 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from rich->keras) (3.0.0)
Requirement already satisfied: pygments<3.0.0,>=2.13.0 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from rich->keras) (2.17.2)
Requirement already satisfied: mdurl~=0.1 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from markdown-it-py>=2.2.0->rich->keras) (0.1.2)
Downloading tensorflow-2.19.0-cp312-cp312-win_amd64.whl (376.0 MB)
   ---------------------------------------- 0.0/376.0 MB ? eta -:--:--
   ---------------------------------------- 3.7/376.0 MB 21.8 MB/s eta 0:00:18
    --------------------------------------- 4.7/376.0 MB 13.0 MB/s eta 0:00:29
    --------------------------------------- 5.8/376.0 MB 10.1 MB/s eta 0:00:37
    --------------------------------------- 6.8/376.0 MB 8.2 MB/s eta 0:00:45
    --------------------------------------- 7.1/376.0 MB 7.6 MB/s eta 0:00:49
    --------------------------------------- 7.6/376.0 MB 6.3 MB/s eta 0:00:59
    --------------------------------------- 8.4/376.0 MB 5.7 MB/s eta 0:01:05
   - -------------------------------------- 9.4/376.0 MB 5.6 MB/s eta 0:01:06
   - -------------------------------------- 10.5/376.0 MB 5.6 MB/s eta 0:01:05
   - -------------------------------------- 12.1/376.0 MB 5.8 MB/s eta 0:01:03
   - -------------------------------------- 13.6/376.0 MB 6.0 MB/s eta 0:01:01
   - -------------------------------------- 14.4/376.0 MB 5.7 MB/s eta 0:01:04
   - -------------------------------------- 14.9/376.0 MB 5.5 MB/s eta 0:01:07
   - -------------------------------------- 15.2/376.0 MB 5.3 MB/s eta 0:01:08
   - -------------------------------------- 16.0/376.0 MB 5.1 MB/s eta 0:01:11
   - -------------------------------------- 16.3/376.0 MB 5.0 MB/s eta 0:01:12
   - -------------------------------------- 17.0/376.0 MB 4.8 MB/s eta 0:01:15
   - -------------------------------------- 18.1/376.0 MB 4.8 MB/s eta 0:01:15
   -- ------------------------------------- 19.4/376.0 MB 4.9 MB/s eta 0:01:14
   -- ------------------------------------- 21.0/376.0 MB 5.0 MB/s eta 0:01:11
   -- ------------------------------------- 22.5/376.0 MB 5.1 MB/s eta 0:01:09
   -- ------------------------------------- 23.6/376.0 MB 5.2 MB/s eta 0:01:09
   -- ------------------------------------- 24.1/376.0 MB 5.1 MB/s eta 0:01:10
   -- ------------------------------------- 24.4/376.0 MB 5.0 MB/s eta 0:01:11
   -- ------------------------------------- 25.2/376.0 MB 4.9 MB/s eta 0:01:12
   -- ------------------------------------- 26.2/376.0 MB 4.9 MB/s eta 0:01:12
   -- ------------------------------------- 27.5/376.0 MB 4.9 MB/s eta 0:01:11
   --- ------------------------------------ 29.1/376.0 MB 5.0 MB/s eta 0:01:10
   --- ------------------------------------ 30.7/376.0 MB 5.1 MB/s eta 0:01:08
   --- ------------------------------------ 31.5/376.0 MB 5.0 MB/s eta 0:01:09
   --- ------------------------------------ 32.0/376.0 MB 5.0 MB/s eta 0:01:10
   --- ------------------------------------ 33.0/376.0 MB 5.0 MB/s eta 0:01:10
   --- ------------------------------------ 34.3/376.0 MB 5.0 MB/s eta 0:01:09
   --- ------------------------------------ 35.7/376.0 MB 5.0 MB/s eta 0:01:08
   --- ------------------------------------ 36.4/376.0 MB 5.0 MB/s eta 0:01:09
   --- ------------------------------------ 37.2/376.0 MB 4.9 MB/s eta 0:01:09
   ---- ----------------------------------- 38.3/376.0 MB 5.0 MB/s eta 0:01:09
   ---- ----------------------------------- 39.6/376.0 MB 5.0 MB/s eta 0:01:08
   ---- ----------------------------------- 41.2/376.0 MB 5.1 MB/s eta 0:01:07
   ---- ----------------------------------- 41.9/376.0 MB 5.0 MB/s eta 0:01:07
   ---- ----------------------------------- 42.5/376.0 MB 5.0 MB/s eta 0:01:07
   ---- ----------------------------------- 43.5/376.0 MB 5.0 MB/s eta 0:01:08
   ---- ----------------------------------- 44.6/376.0 MB 5.0 MB/s eta 0:01:07
   ---- ----------------------------------- 45.9/376.0 MB 5.0 MB/s eta 0:01:06
   ---- ----------------------------------- 46.9/376.0 MB 5.0 MB/s eta 0:01:06
   ----- ---------------------------------- 47.7/376.0 MB 5.0 MB/s eta 0:01:06
   ----- ---------------------------------- 48.8/376.0 MB 5.0 MB/s eta 0:01:06
   ----- ---------------------------------- 49.5/376.0 MB 5.0 MB/s eta 0:01:06
   ----- ---------------------------------- 50.3/376.0 MB 5.0 MB/s eta 0:01:06
   ----- ---------------------------------- 51.6/376.0 MB 5.0 MB/s eta 0:01:06
   ----- ---------------------------------- 53.2/376.0 MB 5.0 MB/s eta 0:01:05
   ----- ---------------------------------- 54.5/376.0 MB 5.0 MB/s eta 0:01:04
   ----- ---------------------------------- 55.3/376.0 MB 5.0 MB/s eta 0:01:04
   ----- ---------------------------------- 55.8/376.0 MB 5.0 MB/s eta 0:01:05
   ----- ---------------------------------- 56.4/376.0 MB 4.9 MB/s eta 0:01:05
   ------ --------------------------------- 57.4/376.0 MB 4.9 MB/s eta 0:01:05
   ------ --------------------------------- 58.2/376.0 MB 4.9 MB/s eta 0:01:05
   ------ --------------------------------- 59.5/376.0 MB 4.9 MB/s eta 0:01:05
   ------ --------------------------------- 61.3/376.0 MB 5.0 MB/s eta 0:01:04
   ------ --------------------------------- 62.9/376.0 MB 5.0 MB/s eta 0:01:03
   ------ --------------------------------- 63.4/376.0 MB 5.0 MB/s eta 0:01:03
   ------ --------------------------------- 64.2/376.0 MB 5.0 MB/s eta 0:01:03
   ------ --------------------------------- 65.3/376.0 MB 5.0 MB/s eta 0:01:03
   ------- -------------------------------- 66.3/376.0 MB 5.0 MB/s eta 0:01:03
   ------- -------------------------------- 67.4/376.0 MB 5.0 MB/s eta 0:01:02
   ------- -------------------------------- 68.7/376.0 MB 5.0 MB/s eta 0:01:02
   ------- -------------------------------- 70.0/376.0 MB 5.0 MB/s eta 0:01:02
   ------- -------------------------------- 70.8/376.0 MB 5.0 MB/s eta 0:01:01
   ------- -------------------------------- 71.6/376.0 MB 5.0 MB/s eta 0:01:02
   ------- -------------------------------- 72.4/376.0 MB 5.0 MB/s eta 0:01:02
   ------- -------------------------------- 73.4/376.0 MB 5.0 MB/s eta 0:01:01
   ------- -------------------------------- 74.4/376.0 MB 5.0 MB/s eta 0:01:01
   -------- ------------------------------- 75.5/376.0 MB 5.0 MB/s eta 0:01:01
   -------- ------------------------------- 76.8/376.0 MB 5.0 MB/s eta 0:01:01
   -------- ------------------------------- 77.6/376.0 MB 5.0 MB/s eta 0:01:00
   -------- ------------------------------- 78.1/376.0 MB 5.0 MB/s eta 0:01:01
   -------- ------------------------------- 78.6/376.0 MB 4.9 MB/s eta 0:01:01
   -------- ------------------------------- 79.4/376.0 MB 4.9 MB/s eta 0:01:01
   -------- ------------------------------- 79.7/376.0 MB 4.9 MB/s eta 0:01:01
   -------- ------------------------------- 80.5/376.0 MB 4.8 MB/s eta 0:01:02
   -------- ------------------------------- 81.5/376.0 MB 4.8 MB/s eta 0:01:01
   -------- ------------------------------- 82.8/376.0 MB 4.8 MB/s eta 0:01:01
   -------- ------------------------------- 84.4/376.0 MB 4.9 MB/s eta 0:01:00
   --------- ------------------------------ 86.2/376.0 MB 4.9 MB/s eta 0:00:59
   --------- ------------------------------ 87.6/376.0 MB 4.9 MB/s eta 0:00:59
   --------- ------------------------------ 88.3/376.0 MB 4.9 MB/s eta 0:00:59
   --------- ------------------------------ 89.1/376.0 MB 4.9 MB/s eta 0:00:59
   --------- ------------------------------ 89.9/376.0 MB 4.9 MB/s eta 0:00:59
   --------- ------------------------------ 90.7/376.0 MB 4.9 MB/s eta 0:00:59
   --------- ------------------------------ 91.8/376.0 MB 4.9 MB/s eta 0:00:59
   --------- ------------------------------ 93.1/376.0 MB 4.9 MB/s eta 0:00:58
   --------- ------------------------------ 93.8/376.0 MB 4.9 MB/s eta 0:00:58
   ---------- ----------------------------- 94.9/376.0 MB 4.9 MB/s eta 0:00:58
   ---------- ----------------------------- 95.9/376.0 MB 4.9 MB/s eta 0:00:58
   ---------- ----------------------------- 97.0/376.0 MB 4.9 MB/s eta 0:00:57
   ---------- ----------------------------- 98.0/376.0 MB 4.9 MB/s eta 0:00:57
   ---------- ----------------------------- 99.1/376.0 MB 4.9 MB/s eta 0:00:57
   ---------- ----------------------------- 99.9/376.0 MB 4.9 MB/s eta 0:00:57
   ---------- ----------------------------- 100.9/376.0 MB 4.9 MB/s eta 0:00:57
   ---------- ----------------------------- 102.2/376.0 MB 4.9 MB/s eta 0:00:56
   ---------- ----------------------------- 103.3/376.0 MB 4.9 MB/s eta 0:00:56
   ----------- ---------------------------- 103.8/376.0 MB 4.9 MB/s eta 0:00:56
   ----------- ---------------------------- 104.6/376.0 MB 4.9 MB/s eta 0:00:56
   ----------- ---------------------------- 105.9/376.0 MB 4.9 MB/s eta 0:00:56
   ----------- ---------------------------- 107.0/376.0 MB 4.9 MB/s eta 0:00:55
   ----------- ---------------------------- 108.5/376.0 MB 4.9 MB/s eta 0:00:55
   ----------- ---------------------------- 109.3/376.0 MB 4.9 MB/s eta 0:00:55
   ----------- ---------------------------- 110.4/376.0 MB 4.9 MB/s eta 0:00:55
   ----------- ---------------------------- 111.7/376.0 MB 4.9 MB/s eta 0:00:54
   ----------- ---------------------------- 112.7/376.0 MB 4.9 MB/s eta 0:00:54
   ------------ --------------------------- 114.0/376.0 MB 4.9 MB/s eta 0:00:54
   ------------ --------------------------- 115.1/376.0 MB 4.9 MB/s eta 0:00:53
   ------------ --------------------------- 115.9/376.0 MB 4.9 MB/s eta 0:00:53
   ------------ --------------------------- 116.4/376.0 MB 4.9 MB/s eta 0:00:53
   ------------ --------------------------- 117.4/376.0 MB 4.9 MB/s eta 0:00:53
   ------------ --------------------------- 118.8/376.0 MB 4.9 MB/s eta 0:00:53
   ------------ --------------------------- 119.8/376.0 MB 4.9 MB/s eta 0:00:52
   ------------ --------------------------- 120.6/376.0 MB 4.9 MB/s eta 0:00:53
   ------------ --------------------------- 121.4/376.0 MB 4.9 MB/s eta 0:00:52
   ------------- -------------------------- 122.7/376.0 MB 4.9 MB/s eta 0:00:52
   ------------- -------------------------- 124.3/376.0 MB 4.9 MB/s eta 0:00:52
   ------------- -------------------------- 125.6/376.0 MB 4.9 MB/s eta 0:00:51
   ------------- -------------------------- 126.4/376.0 MB 4.9 MB/s eta 0:00:51
   ------------- -------------------------- 126.9/376.0 MB 4.9 MB/s eta 0:00:51
   ------------- -------------------------- 127.9/376.0 MB 4.9 MB/s eta 0:00:51
   ------------- -------------------------- 129.0/376.0 MB 4.9 MB/s eta 0:00:51
   ------------- -------------------------- 130.5/376.0 MB 4.9 MB/s eta 0:00:50
   -------------- ------------------------- 131.6/376.0 MB 4.9 MB/s eta 0:00:50
   -------------- ------------------------- 132.1/376.0 MB 4.9 MB/s eta 0:00:50
   -------------- ------------------------- 132.9/376.0 MB 4.9 MB/s eta 0:00:50
   -------------- ------------------------- 134.0/376.0 MB 4.9 MB/s eta 0:00:50
   -------------- ------------------------- 134.5/376.0 MB 4.9 MB/s eta 0:00:50
   -------------- ------------------------- 135.5/376.0 MB 4.9 MB/s eta 0:00:50
   -------------- ------------------------- 136.6/376.0 MB 4.9 MB/s eta 0:00:49
   -------------- ------------------------- 138.1/376.0 MB 4.9 MB/s eta 0:00:49
   -------------- ------------------------- 139.5/376.0 MB 4.9 MB/s eta 0:00:49
   -------------- ------------------------- 139.7/376.0 MB 4.9 MB/s eta 0:00:49
   -------------- ------------------------- 140.0/376.0 MB 4.9 MB/s eta 0:00:49
   -------------- ------------------------- 140.5/376.0 MB 4.9 MB/s eta 0:00:49
   --------------- ------------------------ 141.3/376.0 MB 4.8 MB/s eta 0:00:49
   --------------- ------------------------ 141.8/376.0 MB 4.8 MB/s eta 0:00:49
   --------------- ------------------------ 142.3/376.0 MB 4.8 MB/s eta 0:00:49
   --------------- ------------------------ 143.1/376.0 MB 4.8 MB/s eta 0:00:49
   --------------- ------------------------ 143.9/376.0 MB 4.7 MB/s eta 0:00:50
   --------------- ------------------------ 144.4/376.0 MB 4.7 MB/s eta 0:00:50
   --------------- ------------------------ 145.0/376.0 MB 4.7 MB/s eta 0:00:50
   --------------- ------------------------ 145.5/376.0 MB 4.6 MB/s eta 0:00:50
   --------------- ------------------------ 145.8/376.0 MB 4.6 MB/s eta 0:00:50
   --------------- ------------------------ 146.0/376.0 MB 4.6 MB/s eta 0:00:50
   --------------- ------------------------ 146.0/376.0 MB 4.6 MB/s eta 0:00:50
   --------------- ------------------------ 146.3/376.0 MB 4.6 MB/s eta 0:00:51
   --------------- ------------------------ 146.3/376.0 MB 4.6 MB/s eta 0:00:51
   --------------- ------------------------ 146.5/376.0 MB 4.5 MB/s eta 0:00:51
   --------------- ------------------------ 146.8/376.0 MB 4.5 MB/s eta 0:00:52
   --------------- ------------------------ 147.3/376.0 MB 4.4 MB/s eta 0:00:52
   --------------- ------------------------ 148.1/376.0 MB 4.5 MB/s eta 0:00:52
   --------------- ------------------------ 148.9/376.0 MB 4.5 MB/s eta 0:00:51
   --------------- ------------------------ 149.4/376.0 MB 4.5 MB/s eta 0:00:51
   --------------- ------------------------ 149.7/376.0 MB 4.5 MB/s eta 0:00:51
   --------------- ------------------------ 150.2/376.0 MB 4.4 MB/s eta 0:00:51
   ---------------- ----------------------- 150.7/376.0 MB 4.4 MB/s eta 0:00:51
   ---------------- ----------------------- 151.8/376.0 MB 4.4 MB/s eta 0:00:51
   ---------------- ----------------------- 152.6/376.0 MB 4.4 MB/s eta 0:00:51
   ---------------- ----------------------- 153.9/376.0 MB 4.4 MB/s eta 0:00:51
   ---------------- ----------------------- 155.2/376.0 MB 4.4 MB/s eta 0:00:51
   ---------------- ----------------------- 156.0/376.0 MB 4.4 MB/s eta 0:00:51
   ---------------- ----------------------- 156.8/376.0 MB 4.4 MB/s eta 0:00:50
   ---------------- ----------------------- 157.5/376.0 MB 4.4 MB/s eta 0:00:50
   ---------------- ----------------------- 158.9/376.0 MB 4.4 MB/s eta 0:00:50
   ----------------- ---------------------- 159.9/376.0 MB 4.4 MB/s eta 0:00:49
   ----------------- ---------------------- 160.7/376.0 MB 4.4 MB/s eta 0:00:50
   ----------------- ---------------------- 161.0/376.0 MB 4.4 MB/s eta 0:00:50
   ----------------- ---------------------- 161.2/376.0 MB 4.3 MB/s eta 0:00:50
   ----------------- ---------------------- 161.7/376.0 MB 4.3 MB/s eta 0:00:50
   ----------------- ---------------------- 162.0/376.0 MB 4.3 MB/s eta 0:00:50
   ----------------- ---------------------- 162.8/376.0 MB 4.3 MB/s eta 0:00:50
   ----------------- ---------------------- 163.3/376.0 MB 4.3 MB/s eta 0:00:51
   ----------------- ---------------------- 163.8/376.0 MB 4.2 MB/s eta 0:00:51
   ----------------- ---------------------- 164.6/376.0 MB 4.2 MB/s eta 0:00:50
   ----------------- ---------------------- 165.4/376.0 MB 4.2 MB/s eta 0:00:50
   ----------------- ---------------------- 166.5/376.0 MB 4.2 MB/s eta 0:00:50
   ----------------- ---------------------- 167.8/376.0 MB 4.2 MB/s eta 0:00:50
   ------------------ --------------------- 169.3/376.0 MB 4.2 MB/s eta 0:00:49
   ------------------ --------------------- 170.9/376.0 MB 4.3 MB/s eta 0:00:49
   ------------------ --------------------- 172.2/376.0 MB 4.3 MB/s eta 0:00:48
   ------------------ --------------------- 172.8/376.0 MB 4.3 MB/s eta 0:00:48
   ------------------ --------------------- 172.8/376.0 MB 4.3 MB/s eta 0:00:48
   ------------------ --------------------- 173.3/376.0 MB 4.2 MB/s eta 0:00:49
   ------------------ --------------------- 174.1/376.0 MB 4.2 MB/s eta 0:00:49
   ------------------ --------------------- 174.9/376.0 MB 4.2 MB/s eta 0:00:49
   ------------------ --------------------- 175.6/376.0 MB 4.2 MB/s eta 0:00:48
   ------------------ --------------------- 176.4/376.0 MB 4.2 MB/s eta 0:00:48
   ------------------ --------------------- 177.2/376.0 MB 4.2 MB/s eta 0:00:48
   ------------------ --------------------- 178.0/376.0 MB 4.1 MB/s eta 0:00:48
   ------------------- -------------------- 179.3/376.0 MB 4.1 MB/s eta 0:00:48
   ------------------- -------------------- 180.4/376.0 MB 4.2 MB/s eta 0:00:47
   ------------------- -------------------- 181.9/376.0 MB 4.2 MB/s eta 0:00:47
   ------------------- -------------------- 182.5/376.0 MB 4.2 MB/s eta 0:00:47
   ------------------- -------------------- 183.2/376.0 MB 4.2 MB/s eta 0:00:46
   ------------------- -------------------- 184.0/376.0 MB 4.2 MB/s eta 0:00:46
   ------------------- -------------------- 185.3/376.0 MB 4.2 MB/s eta 0:00:46
   ------------------- -------------------- 186.9/376.0 MB 4.2 MB/s eta 0:00:46
   -------------------- ------------------- 188.2/376.0 MB 4.2 MB/s eta 0:00:46
   -------------------- ------------------- 189.0/376.0 MB 4.2 MB/s eta 0:00:45
   -------------------- ------------------- 190.1/376.0 MB 4.2 MB/s eta 0:00:45
   -------------------- ------------------- 191.1/376.0 MB 4.2 MB/s eta 0:00:45
   -------------------- ------------------- 191.6/376.0 MB 4.2 MB/s eta 0:00:45
   -------------------- ------------------- 192.4/376.0 MB 4.2 MB/s eta 0:00:45
   -------------------- ------------------- 193.7/376.0 MB 4.1 MB/s eta 0:00:44
   -------------------- ------------------- 195.3/376.0 MB 4.2 MB/s eta 0:00:44
   -------------------- ------------------- 196.3/376.0 MB 4.2 MB/s eta 0:00:44
   --------------------- ------------------ 197.4/376.0 MB 4.2 MB/s eta 0:00:43
   --------------------- ------------------ 198.2/376.0 MB 4.2 MB/s eta 0:00:43
   --------------------- ------------------ 199.0/376.0 MB 4.2 MB/s eta 0:00:43
   --------------------- ------------------ 199.5/376.0 MB 4.1 MB/s eta 0:00:43
   --------------------- ------------------ 200.5/376.0 MB 4.1 MB/s eta 0:00:43
   --------------------- ------------------ 201.6/376.0 MB 4.1 MB/s eta 0:00:43
   --------------------- ------------------ 202.9/376.0 MB 4.2 MB/s eta 0:00:42
   --------------------- ------------------ 203.7/376.0 MB 4.2 MB/s eta 0:00:42
   --------------------- ------------------ 204.5/376.0 MB 4.2 MB/s eta 0:00:42
   --------------------- ------------------ 205.0/376.0 MB 4.2 MB/s eta 0:00:41
   --------------------- ------------------ 205.5/376.0 MB 4.2 MB/s eta 0:00:41
   --------------------- ------------------ 206.0/376.0 MB 4.2 MB/s eta 0:00:41
   ---------------------- ----------------- 207.1/376.0 MB 4.2 MB/s eta 0:00:41
   ---------------------- ----------------- 208.1/376.0 MB 4.2 MB/s eta 0:00:41
   ---------------------- ----------------- 209.7/376.0 MB 4.2 MB/s eta 0:00:41
   ---------------------- ----------------- 211.3/376.0 MB 4.1 MB/s eta 0:00:40
   ---------------------- ----------------- 212.6/376.0 MB 4.2 MB/s eta 0:00:40
   ---------------------- ----------------- 213.6/376.0 MB 4.2 MB/s eta 0:00:40
   ---------------------- ----------------- 214.4/376.0 MB 4.2 MB/s eta 0:00:39
   ---------------------- ----------------- 215.2/376.0 MB 4.2 MB/s eta 0:00:39
   ----------------------- ---------------- 216.3/376.0 MB 4.2 MB/s eta 0:00:39
   ----------------------- ---------------- 217.6/376.0 MB 4.2 MB/s eta 0:00:38
   ----------------------- ---------------- 218.4/376.0 MB 4.2 MB/s eta 0:00:38
   ----------------------- ---------------- 218.9/376.0 MB 4.2 MB/s eta 0:00:38
   ----------------------- ---------------- 219.4/376.0 MB 4.1 MB/s eta 0:00:38
   ----------------------- ---------------- 220.2/376.0 MB 4.1 MB/s eta 0:00:38
   ----------------------- ---------------- 220.7/376.0 MB 4.1 MB/s eta 0:00:38
   ----------------------- ---------------- 221.2/376.0 MB 4.1 MB/s eta 0:00:38
   ----------------------- ---------------- 222.3/376.0 MB 4.1 MB/s eta 0:00:38
   ----------------------- ---------------- 223.3/376.0 MB 4.1 MB/s eta 0:00:38
   ----------------------- ---------------- 224.9/376.0 MB 4.1 MB/s eta 0:00:37
   ------------------------ --------------- 226.2/376.0 MB 4.1 MB/s eta 0:00:37
   ------------------------ --------------- 227.3/376.0 MB 4.1 MB/s eta 0:00:37
   ------------------------ --------------- 228.1/376.0 MB 4.1 MB/s eta 0:00:36
   ------------------------ --------------- 228.6/376.0 MB 4.1 MB/s eta 0:00:36
   ------------------------ --------------- 229.4/376.0 MB 4.1 MB/s eta 0:00:36
   ------------------------ --------------- 230.7/376.0 MB 4.1 MB/s eta 0:00:36
   ------------------------ --------------- 232.0/376.0 MB 4.1 MB/s eta 0:00:36
   ------------------------ --------------- 233.6/376.0 MB 4.1 MB/s eta 0:00:35
   ------------------------ --------------- 234.4/376.0 MB 4.1 MB/s eta 0:00:35
   ------------------------ --------------- 234.9/376.0 MB 4.1 MB/s eta 0:00:35
   ------------------------- -------------- 235.9/376.0 MB 4.1 MB/s eta 0:00:35
   ------------------------- -------------- 237.5/376.0 MB 4.1 MB/s eta 0:00:34
   ------------------------- -------------- 238.8/376.0 MB 4.1 MB/s eta 0:00:34
   ------------------------- -------------- 239.6/376.0 MB 4.1 MB/s eta 0:00:34
   ------------------------- -------------- 240.1/376.0 MB 4.1 MB/s eta 0:00:34
   ------------------------- -------------- 241.2/376.0 MB 4.1 MB/s eta 0:00:33
   ------------------------- -------------- 242.2/376.0 MB 4.1 MB/s eta 0:00:33
   ------------------------- -------------- 243.8/376.0 MB 4.1 MB/s eta 0:00:33
   -------------------------- ------------- 244.8/376.0 MB 4.1 MB/s eta 0:00:32
   -------------------------- ------------- 245.6/376.0 MB 4.1 MB/s eta 0:00:32
   -------------------------- ------------- 246.2/376.0 MB 4.1 MB/s eta 0:00:32
   -------------------------- ------------- 247.2/376.0 MB 4.1 MB/s eta 0:00:32
   -------------------------- ------------- 248.3/376.0 MB 4.1 MB/s eta 0:00:32
   -------------------------- ------------- 249.6/376.0 MB 4.1 MB/s eta 0:00:31
   -------------------------- ------------- 250.3/376.0 MB 4.1 MB/s eta 0:00:31
   -------------------------- ------------- 250.9/376.0 MB 4.1 MB/s eta 0:00:31
   -------------------------- ------------- 251.9/376.0 MB 4.1 MB/s eta 0:00:31
   -------------------------- ------------- 253.0/376.0 MB 4.1 MB/s eta 0:00:31
   --------------------------- ------------ 254.3/376.0 MB 4.1 MB/s eta 0:00:30
   --------------------------- ------------ 255.1/376.0 MB 4.1 MB/s eta 0:00:30
   --------------------------- ------------ 256.1/376.0 MB 4.1 MB/s eta 0:00:30
   --------------------------- ------------ 257.2/376.0 MB 4.1 MB/s eta 0:00:29
   --------------------------- ------------ 258.5/376.0 MB 4.1 MB/s eta 0:00:29
   --------------------------- ------------ 259.3/376.0 MB 4.1 MB/s eta 0:00:29
   --------------------------- ------------ 260.3/376.0 MB 4.1 MB/s eta 0:00:29
   --------------------------- ------------ 261.4/376.0 MB 4.1 MB/s eta 0:00:29
   --------------------------- ------------ 262.9/376.0 MB 4.1 MB/s eta 0:00:28
   ---------------------------- ----------- 264.2/376.0 MB 4.2 MB/s eta 0:00:27
   ---------------------------- ----------- 265.0/376.0 MB 4.2 MB/s eta 0:00:27
   ---------------------------- ----------- 265.8/376.0 MB 4.2 MB/s eta 0:00:27
   ---------------------------- ----------- 266.1/376.0 MB 4.2 MB/s eta 0:00:27
   ---------------------------- ----------- 266.6/376.0 MB 4.2 MB/s eta 0:00:27
   ---------------------------- ----------- 267.4/376.0 MB 4.2 MB/s eta 0:00:27
   ---------------------------- ----------- 268.7/376.0 MB 4.2 MB/s eta 0:00:26
   ---------------------------- ----------- 270.3/376.0 MB 4.2 MB/s eta 0:00:26
   ---------------------------- ----------- 271.8/376.0 MB 4.2 MB/s eta 0:00:25
   ----------------------------- ---------- 272.9/376.0 MB 4.3 MB/s eta 0:00:25
   ----------------------------- ---------- 273.9/376.0 MB 4.3 MB/s eta 0:00:24
   ----------------------------- ---------- 274.5/376.0 MB 4.3 MB/s eta 0:00:24
   ----------------------------- ---------- 275.3/376.0 MB 4.4 MB/s eta 0:00:24
   ----------------------------- ---------- 276.0/376.0 MB 4.4 MB/s eta 0:00:23
   ----------------------------- ---------- 277.3/376.0 MB 4.4 MB/s eta 0:00:23
   ----------------------------- ---------- 277.9/376.0 MB 4.4 MB/s eta 0:00:23
   ----------------------------- ---------- 278.4/376.0 MB 4.4 MB/s eta 0:00:23
   ----------------------------- ---------- 279.4/376.0 MB 4.4 MB/s eta 0:00:22
   ----------------------------- ---------- 280.5/376.0 MB 4.4 MB/s eta 0:00:22
   ------------------------------ --------- 282.1/376.0 MB 4.5 MB/s eta 0:00:22
   ------------------------------ --------- 283.4/376.0 MB 4.5 MB/s eta 0:00:21
   ------------------------------ --------- 284.4/376.0 MB 4.5 MB/s eta 0:00:21
   ------------------------------ --------- 285.2/376.0 MB 4.5 MB/s eta 0:00:21
   ------------------------------ --------- 285.7/376.0 MB 4.5 MB/s eta 0:00:20
   ------------------------------ --------- 286.5/376.0 MB 4.5 MB/s eta 0:00:20
   ------------------------------ --------- 287.8/376.0 MB 4.5 MB/s eta 0:00:20
   ------------------------------ --------- 289.4/376.0 MB 4.6 MB/s eta 0:00:19
   ------------------------------ --------- 290.7/376.0 MB 4.6 MB/s eta 0:00:19
   ------------------------------ --------- 291.2/376.0 MB 4.5 MB/s eta 0:00:19
   ------------------------------- -------- 292.0/376.0 MB 4.5 MB/s eta 0:00:19
   ------------------------------- -------- 292.8/376.0 MB 4.5 MB/s eta 0:00:19
   ------------------------------- -------- 294.1/376.0 MB 4.6 MB/s eta 0:00:18
   ------------------------------- -------- 295.7/376.0 MB 4.6 MB/s eta 0:00:18
   ------------------------------- -------- 296.7/376.0 MB 4.6 MB/s eta 0:00:18
   ------------------------------- -------- 297.5/376.0 MB 4.6 MB/s eta 0:00:18
   ------------------------------- -------- 298.3/376.0 MB 4.6 MB/s eta 0:00:17
   ------------------------------- -------- 299.1/376.0 MB 4.6 MB/s eta 0:00:17
   ------------------------------- -------- 300.4/376.0 MB 4.6 MB/s eta 0:00:17
   -------------------------------- ------- 301.5/376.0 MB 4.7 MB/s eta 0:00:16
   -------------------------------- ------- 302.0/376.0 MB 4.7 MB/s eta 0:00:16
   -------------------------------- ------- 302.3/376.0 MB 4.7 MB/s eta 0:00:16
   -------------------------------- ------- 303.0/376.0 MB 4.6 MB/s eta 0:00:16
   -------------------------------- ------- 303.8/376.0 MB 4.7 MB/s eta 0:00:16
   -------------------------------- ------- 304.9/376.0 MB 4.7 MB/s eta 0:00:16
   -------------------------------- ------- 305.9/376.0 MB 4.7 MB/s eta 0:00:16
   -------------------------------- ------- 307.8/376.0 MB 4.7 MB/s eta 0:00:15
   -------------------------------- ------- 308.5/376.0 MB 4.7 MB/s eta 0:00:15
   -------------------------------- ------- 309.3/376.0 MB 4.6 MB/s eta 0:00:15
   -------------------------------- ------- 310.1/376.0 MB 4.6 MB/s eta 0:00:15
   --------------------------------- ------ 311.2/376.0 MB 4.6 MB/s eta 0:00:15
   --------------------------------- ------ 312.7/376.0 MB 4.7 MB/s eta 0:00:14
   --------------------------------- ------ 313.8/376.0 MB 4.7 MB/s eta 0:00:14
   --------------------------------- ------ 314.3/376.0 MB 4.7 MB/s eta 0:00:14
   --------------------------------- ------ 315.1/376.0 MB 4.7 MB/s eta 0:00:13
   --------------------------------- ------ 316.1/376.0 MB 4.7 MB/s eta 0:00:13
   --------------------------------- ------ 317.5/376.0 MB 4.7 MB/s eta 0:00:13
   --------------------------------- ------ 318.8/376.0 MB 4.7 MB/s eta 0:00:13
   ---------------------------------- ----- 319.8/376.0 MB 4.7 MB/s eta 0:00:12
   ---------------------------------- ----- 320.1/376.0 MB 4.7 MB/s eta 0:00:12
   ---------------------------------- ----- 320.9/376.0 MB 4.7 MB/s eta 0:00:12
   ---------------------------------- ----- 322.2/376.0 MB 4.7 MB/s eta 0:00:12
   ---------------------------------- ----- 323.5/376.0 MB 4.7 MB/s eta 0:00:12
   ---------------------------------- ----- 324.8/376.0 MB 4.7 MB/s eta 0:00:11
   ---------------------------------- ----- 325.8/376.0 MB 4.8 MB/s eta 0:00:11
   ---------------------------------- ----- 326.6/376.0 MB 4.7 MB/s eta 0:00:11
   ---------------------------------- ----- 327.9/376.0 MB 4.7 MB/s eta 0:00:11
   ----------------------------------- ---- 329.3/376.0 MB 4.7 MB/s eta 0:00:10
   ----------------------------------- ---- 330.3/376.0 MB 4.7 MB/s eta 0:00:10
   ----------------------------------- ---- 331.4/376.0 MB 4.7 MB/s eta 0:00:10
   ----------------------------------- ---- 332.4/376.0 MB 4.7 MB/s eta 0:00:10
   ----------------------------------- ---- 333.4/376.0 MB 4.7 MB/s eta 0:00:09
   ----------------------------------- ---- 334.5/376.0 MB 4.7 MB/s eta 0:00:09
   ----------------------------------- ---- 335.8/376.0 MB 4.7 MB/s eta 0:00:09
   ----------------------------------- ---- 336.1/376.0 MB 4.7 MB/s eta 0:00:09
   ----------------------------------- ---- 336.6/376.0 MB 4.7 MB/s eta 0:00:09
   ----------------------------------- ---- 337.4/376.0 MB 4.7 MB/s eta 0:00:09
   ----------------------------------- ---- 337.9/376.0 MB 4.7 MB/s eta 0:00:09
   ------------------------------------ --- 338.7/376.0 MB 4.7 MB/s eta 0:00:08
   ------------------------------------ --- 340.0/376.0 MB 4.7 MB/s eta 0:00:08
   ------------------------------------ --- 341.3/376.0 MB 4.7 MB/s eta 0:00:08
   ------------------------------------ --- 342.9/376.0 MB 4.7 MB/s eta 0:00:08
   ------------------------------------ --- 344.5/376.0 MB 4.7 MB/s eta 0:00:07
   ------------------------------------ --- 345.0/376.0 MB 4.7 MB/s eta 0:00:07
   ------------------------------------ --- 345.5/376.0 MB 4.7 MB/s eta 0:00:07
   ------------------------------------ --- 346.0/376.0 MB 4.7 MB/s eta 0:00:07
   ------------------------------------ --- 347.1/376.0 MB 4.7 MB/s eta 0:00:07
   ------------------------------------- -- 348.4/376.0 MB 4.8 MB/s eta 0:00:06
   ------------------------------------- -- 349.7/376.0 MB 4.8 MB/s eta 0:00:06
   ------------------------------------- -- 350.7/376.0 MB 4.8 MB/s eta 0:00:06
   ------------------------------------- -- 351.5/376.0 MB 4.8 MB/s eta 0:00:06
   ------------------------------------- -- 352.6/376.0 MB 4.7 MB/s eta 0:00:05
   ------------------------------------- -- 353.4/376.0 MB 4.7 MB/s eta 0:00:05
   ------------------------------------- -- 353.9/376.0 MB 4.7 MB/s eta 0:00:05
   ------------------------------------- -- 354.7/376.0 MB 4.7 MB/s eta 0:00:05
   ------------------------------------- -- 355.7/376.0 MB 4.7 MB/s eta 0:00:05
   ------------------------------------- -- 356.3/376.0 MB 4.7 MB/s eta 0:00:05
   ------------------------------------- -- 356.8/376.0 MB 4.7 MB/s eta 0:00:05
   -------------------------------------- - 357.8/376.0 MB 4.7 MB/s eta 0:00:04
   -------------------------------------- - 359.1/376.0 MB 4.7 MB/s eta 0:00:04
   -------------------------------------- - 360.4/376.0 MB 4.7 MB/s eta 0:00:04
   -------------------------------------- - 361.8/376.0 MB 4.7 MB/s eta 0:00:04
   -------------------------------------- - 362.8/376.0 MB 4.7 MB/s eta 0:00:03
   -------------------------------------- - 363.6/376.0 MB 4.8 MB/s eta 0:00:03
   -------------------------------------- - 364.6/376.0 MB 4.8 MB/s eta 0:00:03
   -------------------------------------- - 366.0/376.0 MB 4.8 MB/s eta 0:00:03
   ---------------------------------------  367.3/376.0 MB 4.8 MB/s eta 0:00:02
   ---------------------------------------  368.1/376.0 MB 4.7 MB/s eta 0:00:02
   ---------------------------------------  368.8/376.0 MB 4.7 MB/s eta 0:00:02
   ---------------------------------------  369.4/376.0 MB 4.7 MB/s eta 0:00:02
   ---------------------------------------  370.1/376.0 MB 4.7 MB/s eta 0:00:02
   ---------------------------------------  371.5/376.0 MB 4.7 MB/s eta 0:00:01
   ---------------------------------------  372.8/376.0 MB 4.8 MB/s eta 0:00:01
   ---------------------------------------  374.3/376.0 MB 4.8 MB/s eta 0:00:01
   ---------------------------------------  375.7/376.0 MB 4.8 MB/s eta 0:00:01
   ---------------------------------------  375.9/376.0 MB 4.8 MB/s eta 0:00:01
   ---------------------------------------  375.9/376.0 MB 4.8 MB/s eta 0:00:01
   ---------------------------------------  375.9/376.0 MB 4.8 MB/s eta 0:00:01
   ---------------------------------------  375.9/376.0 MB 4.8 MB/s eta 0:00:01
   ---------------------------------------  375.9/376.0 MB 4.8 MB/s eta 0:00:01
   ---------------------------------------- 376.0/376.0 MB 4.6 MB/s eta 0:00:00
Downloading ml_dtypes-0.5.1-cp312-cp312-win_amd64.whl (210 kB)
Downloading tensorboard-2.19.0-py3-none-any.whl (5.5 MB)
   ---------------------------------------- 0.0/5.5 MB ? eta -:--:--
   ----- ---------------------------------- 0.8/5.5 MB 4.8 MB/s eta 0:00:01
   --------------- ------------------------ 2.1/5.5 MB 5.3 MB/s eta 0:00:01
   -------------------------- ------------- 3.7/5.5 MB 6.1 MB/s eta 0:00:01
   -------------------------------------- - 5.2/5.5 MB 6.6 MB/s eta 0:00:01
   ---------------------------------------- 5.5/5.5 MB 6.5 MB/s eta 0:00:00
Downloading keras-3.9.2-py3-none-any.whl (1.3 MB)
   ---------------------------------------- 0.0/1.3 MB ? eta -:--:--
   ---------------------------------------- 1.3/1.3 MB 8.6 MB/s eta 0:00:00
Downloading flatbuffers-25.2.10-py2.py3-none-any.whl (30 kB)
Downloading h5py-3.13.0-cp312-cp312-win_amd64.whl (3.0 MB)
   ---------------------------------------- 0.0/3.0 MB ? eta -:--:--
   ------------------------ --------------- 1.8/3.0 MB 9.1 MB/s eta 0:00:01
   ---------------------------------------- 3.0/3.0 MB 7.2 MB/s eta 0:00:00
Downloading optree-0.15.0-cp312-cp312-win_amd64.whl (307 kB)
Installing collected packages: flatbuffers, optree, ml-dtypes, h5py, tensorboard, keras, tensorflow

  Attempting uninstall: flatbuffers

    Found existing installation: flatbuffers 23.5.26

    Uninstalling flatbuffers-23.5.26:

      Successfully uninstalled flatbuffers-23.5.26

   ---------------------------------------- 0/7 [flatbuffers]
   ----- ---------------------------------- 1/7 [optree]
   ----- ---------------------------------- 1/7 [optree]
   ----- ---------------------------------- 1/7 [optree]
  Attempting uninstall: ml-dtypes
   ----- ---------------------------------- 1/7 [optree]
    Found existing installation: ml-dtypes 0.3.2
   ----- ---------------------------------- 1/7 [optree]
    Uninstalling ml-dtypes-0.3.2:
   ----- ---------------------------------- 1/7 [optree]
      Successfully uninstalled ml-dtypes-0.3.2
   ----- ---------------------------------- 1/7 [optree]
   ----------- ---------------------------- 2/7 [ml-dtypes]
   ----------- ---------------------------- 2/7 [ml-dtypes]
   ----------- ---------------------------- 2/7 [ml-dtypes]
   ----------- ---------------------------- 2/7 [ml-dtypes]
   ----------- ---------------------------- 2/7 [ml-dtypes]
   ----------- ---------------------------- 2/7 [ml-dtypes]
   ----------- ---------------------------- 2/7 [ml-dtypes]
   ----------- ---------------------------- 2/7 [ml-dtypes]
   ----------- ---------------------------- 2/7 [ml-dtypes]
   ----------- ---------------------------- 2/7 [ml-dtypes]
   ----------- ---------------------------- 2/7 [ml-dtypes]
   ----------- ---------------------------- 2/7 [ml-dtypes]
   ----------- ---------------------------- 2/7 [ml-dtypes]
   ----------- ---------------------------- 2/7 [ml-dtypes]
   ----------- ---------------------------- 2/7 [ml-dtypes]
   ----------- ---------------------------- 2/7 [ml-dtypes]
   ----------- ---------------------------- 2/7 [ml-dtypes]
   ----------- ---------------------------- 2/7 [ml-dtypes]
  Attempting uninstall: h5py
   ----------- ---------------------------- 2/7 [ml-dtypes]
    Found existing installation: h5py 3.10.0
   ----------- ---------------------------- 2/7 [ml-dtypes]
    Uninstalling h5py-3.10.0:
   ----------- ---------------------------- 2/7 [ml-dtypes]
   ----------------- ---------------------- 3/7 [h5py]
      Successfully uninstalled h5py-3.10.0
   ----------------- ---------------------- 3/7 [h5py]
   ----------------- ---------------------- 3/7 [h5py]
   ----------------- ---------------------- 3/7 [h5py]
   ----------------- ---------------------- 3/7 [h5py]
   ----------------- ---------------------- 3/7 [h5py]
   ----------------- ---------------------- 3/7 [h5py]
   ----------------- ---------------------- 3/7 [h5py]
   ----------------- ---------------------- 3/7 [h5py]
   ----------------- ---------------------- 3/7 [h5py]
   ----------------- ---------------------- 3/7 [h5py]
   ----------------- ---------------------- 3/7 [h5py]
   ----------------- ---------------------- 3/7 [h5py]
   ----------------- ---------------------- 3/7 [h5py]
   ----------------- ---------------------- 3/7 [h5py]
   ----------------- ---------------------- 3/7 [h5py]
   ----------------- ---------------------- 3/7 [h5py]
   ----------------- ---------------------- 3/7 [h5py]
   ----------------- ---------------------- 3/7 [h5py]
Note: you may need to restart the kernel to use updated packages.
In [10]:
 pip install --upgrade pip
Requirement already satisfied: pip in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (24.3.1)
Collecting pip
  Downloading pip-25.1.1-py3-none-any.whl.metadata (3.6 kB)
Downloading pip-25.1.1-py3-none-any.whl (1.8 MB)
   ---------------------------------------- 0.0/1.8 MB ? eta -:--:--
   ---------------------------------------- 1.8/1.8 MB 16.8 MB/s eta 0:00:00
Installing collected packages: pip
  Attempting uninstall: pip
    Found existing installation: pip 24.3.1
    Uninstalling pip-24.3.1:
      Successfully uninstalled pip-24.3.1
Successfully installed pip-25.1.1
Note: you may need to restart the kernel to use updated packages.
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
'''4. Design and implement a CNN for Image Classification a) Select a suitable image classification
dataset (medical imaging, agricultural, etc.). b) Optimized with different hyper-parameters including
learning rate, filter size, no. of layers, optimizers, dropouts, etc.
In [ ]:
 
In [3]:
!pip install tensorflow-datasets
Collecting tensorflow-datasets
  Downloading tensorflow_datasets-4.9.8-py3-none-any.whl.metadata (11 kB)
Requirement already satisfied: absl-py in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from tensorflow-datasets) (2.1.0)
Requirement already satisfied: dm-tree in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from tensorflow-datasets) (0.1.8)
Collecting etils>=1.9.1 (from etils[edc,enp,epath,epy,etree]>=1.9.1; python_version >= "3.11"->tensorflow-datasets)
  Downloading etils-1.12.2-py3-none-any.whl.metadata (6.5 kB)
Collecting immutabledict (from tensorflow-datasets)
  Downloading immutabledict-4.2.1-py3-none-any.whl.metadata (3.5 kB)
Requirement already satisfied: numpy in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from tensorflow-datasets) (1.26.3)
Collecting promise (from tensorflow-datasets)
  Downloading promise-2.3.tar.gz (19 kB)
  Preparing metadata (setup.py): started
  Preparing metadata (setup.py): finished with status 'done'
Requirement already satisfied: protobuf>=3.20 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from tensorflow-datasets) (4.21.12)
Requirement already satisfied: psutil in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from tensorflow-datasets) (5.9.8)
Collecting pyarrow (from tensorflow-datasets)
  Downloading pyarrow-20.0.0-cp312-cp312-win_amd64.whl.metadata (3.4 kB)
Requirement already satisfied: requests>=2.19.0 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from tensorflow-datasets) (2.31.0)
Collecting simple_parsing (from tensorflow-datasets)
  Downloading simple_parsing-0.1.7-py3-none-any.whl.metadata (7.3 kB)
Collecting tensorflow-metadata (from tensorflow-datasets)
  Downloading tensorflow_metadata-1.17.1-py3-none-any.whl.metadata (2.6 kB)
Requirement already satisfied: termcolor in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from tensorflow-datasets) (2.4.0)
Collecting toml (from tensorflow-datasets)
  Downloading toml-0.10.2-py2.py3-none-any.whl.metadata (7.1 kB)
Requirement already satisfied: tqdm in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from tensorflow-datasets) (4.66.4)
Requirement already satisfied: wrapt in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from tensorflow-datasets) (1.16.0)
Collecting einops (from etils[edc,enp,epath,epy,etree]>=1.9.1; python_version >= "3.11"->tensorflow-datasets)
  Downloading einops-0.8.1-py3-none-any.whl.metadata (13 kB)
Collecting fsspec (from etils[edc,enp,epath,epy,etree]>=1.9.1; python_version >= "3.11"->tensorflow-datasets)
  Downloading fsspec-2025.3.2-py3-none-any.whl.metadata (11 kB)
Collecting importlib_resources (from etils[edc,enp,epath,epy,etree]>=1.9.1; python_version >= "3.11"->tensorflow-datasets)
  Downloading importlib_resources-6.5.2-py3-none-any.whl.metadata (3.9 kB)
Requirement already satisfied: typing_extensions in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from etils[edc,enp,epath,epy,etree]>=1.9.1; python_version >= "3.11"->tensorflow-datasets) (4.10.0)
Collecting zipp (from etils[edc,enp,epath,epy,etree]>=1.9.1; python_version >= "3.11"->tensorflow-datasets)
  Downloading zipp-3.21.0-py3-none-any.whl.metadata (3.7 kB)
Requirement already satisfied: charset-normalizer<4,>=2 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from requests>=2.19.0->tensorflow-datasets) (3.3.2)
Requirement already satisfied: idna<4,>=2.5 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from requests>=2.19.0->tensorflow-datasets) (3.6)
Requirement already satisfied: urllib3<3,>=1.21.1 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from requests>=2.19.0->tensorflow-datasets) (2.2.1)
Requirement already satisfied: certifi>=2017.4.17 in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from requests>=2.19.0->tensorflow-datasets) (2024.2.2)
Requirement already satisfied: six in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from promise->tensorflow-datasets) (1.16.0)
Collecting docstring-parser<1.0,>=0.15 (from simple_parsing->tensorflow-datasets)
  Downloading docstring_parser-0.16-py3-none-any.whl.metadata (3.0 kB)
Collecting googleapis-common-protos<2,>=1.56.4 (from tensorflow-metadata->tensorflow-datasets)
  Downloading googleapis_common_protos-1.70.0-py3-none-any.whl.metadata (9.3 kB)
Collecting protobuf>=3.20 (from tensorflow-datasets)
  Downloading protobuf-5.29.4-cp310-abi3-win_amd64.whl.metadata (592 bytes)
Requirement already satisfied: colorama in c:\users\lenovo\appdata\local\programs\python\python312\lib\site-packages (from tqdm->tensorflow-datasets) (0.4.6)
Downloading tensorflow_datasets-4.9.8-py3-none-any.whl (5.3 MB)
   ---------------------------------------- 0.0/5.3 MB ? eta -:--:--
   --------------------------- ------------ 3.7/5.3 MB 19.8 MB/s eta 0:00:01
   ------------------------------------- -- 5.0/5.3 MB 12.1 MB/s eta 0:00:01
   ---------------------------------------- 5.3/5.3 MB 10.4 MB/s eta 0:00:00
Downloading etils-1.12.2-py3-none-any.whl (167 kB)
Downloading einops-0.8.1-py3-none-any.whl (64 kB)
Downloading fsspec-2025.3.2-py3-none-any.whl (194 kB)
Downloading immutabledict-4.2.1-py3-none-any.whl (4.7 kB)
Downloading importlib_resources-6.5.2-py3-none-any.whl (37 kB)
Downloading pyarrow-20.0.0-cp312-cp312-win_amd64.whl (25.7 MB)
   ---------------------------------------- 0.0/25.7 MB ? eta -:--:--
   -- ------------------------------------- 1.6/25.7 MB 7.6 MB/s eta 0:00:04
   --- ------------------------------------ 2.4/25.7 MB 5.4 MB/s eta 0:00:05
   ---- ----------------------------------- 2.6/25.7 MB 4.2 MB/s eta 0:00:06
   ---- ----------------------------------- 2.9/25.7 MB 3.4 MB/s eta 0:00:07
   ---- ----------------------------------- 3.1/25.7 MB 3.1 MB/s eta 0:00:08
   ----- ---------------------------------- 3.7/25.7 MB 2.8 MB/s eta 0:00:08
   ------ --------------------------------- 4.5/25.7 MB 2.9 MB/s eta 0:00:08
   -------- ------------------------------- 5.8/25.7 MB 3.2 MB/s eta 0:00:07
   ----------- ---------------------------- 7.1/25.7 MB 3.6 MB/s eta 0:00:06
   ------------- -------------------------- 8.7/25.7 MB 4.0 MB/s eta 0:00:05
   ---------------- ----------------------- 10.7/25.7 MB 4.5 MB/s eta 0:00:04
   ------------------ --------------------- 11.8/25.7 MB 4.6 MB/s eta 0:00:04
   ------------------- -------------------- 12.3/25.7 MB 4.5 MB/s eta 0:00:03
   ------------------- -------------------- 12.6/25.7 MB 4.3 MB/s eta 0:00:04
   -------------------- ------------------- 13.4/25.7 MB 4.1 MB/s eta 0:00:03
   ---------------------- ----------------- 14.2/25.7 MB 4.1 MB/s eta 0:00:03
   ------------------------ --------------- 15.5/25.7 MB 4.2 MB/s eta 0:00:03
   -------------------------- ------------- 16.8/25.7 MB 4.4 MB/s eta 0:00:03
   ----------------------------- ---------- 18.6/25.7 MB 4.6 MB/s eta 0:00:02
   ------------------------------- -------- 19.9/25.7 MB 4.7 MB/s eta 0:00:02
   -------------------------------- ------- 21.0/25.7 MB 4.7 MB/s eta 0:00:01
   --------------------------------- ------ 21.8/25.7 MB 4.7 MB/s eta 0:00:01
   ---------------------------------- ----- 22.3/25.7 MB 4.6 MB/s eta 0:00:01
   ----------------------------------- ---- 23.1/25.7 MB 4.5 MB/s eta 0:00:01
   ------------------------------------- -- 23.9/25.7 MB 4.5 MB/s eta 0:00:01
   -------------------------------------- - 24.9/25.7 MB 4.5 MB/s eta 0:00:01
   ---------------------------------------- 25.7/25.7 MB 4.5 MB/s eta 0:00:00
Downloading simple_parsing-0.1.7-py3-none-any.whl (112 kB)
Downloading docstring_parser-0.16-py3-none-any.whl (36 kB)
Downloading tensorflow_metadata-1.17.1-py3-none-any.whl (31 kB)
Downloading googleapis_common_protos-1.70.0-py3-none-any.whl (294 kB)
Downloading protobuf-5.29.4-cp310-abi3-win_amd64.whl (434 kB)
Downloading toml-0.10.2-py2.py3-none-any.whl (16 kB)
Downloading zipp-3.21.0-py3-none-any.whl (9.6 kB)
Building wheels for collected packages: promise
  Building wheel for promise (setup.py): started
  Building wheel for promise (setup.py): finished with status 'done'
  Created wheel for promise: filename=promise-2.3-py3-none-any.whl size=21545 sha256=bf526d1e550cce2d8a23817c4a1af97ede4afdc697f26f968846de09f2a0d730
  Stored in directory: c:\users\lenovo\appdata\local\pip\cache\wheels\e7\e6\28\864bdfee5339dbd6ddcb5a186286a8e217648ec198bdf0097d
Successfully built promise
Installing collected packages: zipp, toml, pyarrow, protobuf, promise, importlib_resources, immutabledict, fsspec, etils, einops, docstring-parser, simple_parsing, googleapis-common-protos, tensorflow-metadata, tensorflow-datasets

   -- -------------------------------------  1/15 [toml]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
   ----- ----------------------------------  2/15 [pyarrow]
  Attempting uninstall: protobuf
   ----- ----------------------------------  2/15 [pyarrow]
    Found existing installation: protobuf 4.21.12
   ----- ----------------------------------  2/15 [pyarrow]
   -------- -------------------------------  3/15 [protobuf]
    Uninstalling protobuf-4.21.12:
   -------- -------------------------------  3/15 [protobuf]
      Successfully uninstalled protobuf-4.21.12
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   -------- -------------------------------  3/15 [protobuf]
   ---------- -----------------------------  4/15 [promise]
   ------------- --------------------------  5/15 [importlib_resources]
   ------------- --------------------------  5/15 [importlib_resources]
   ------------- --------------------------  5/15 [importlib_resources]
   ---------------- -----------------------  6/15 [immutabledict]
   ------------------ ---------------------  7/15 [fsspec]
   ------------------ ---------------------  7/15 [fsspec]
   ------------------ ---------------------  7/15 [fsspec]
   ------------------ ---------------------  7/15 [fsspec]
   ------------------ ---------------------  7/15 [fsspec]
   ------------------ ---------------------  7/15 [fsspec]
   ------------------ ---------------------  7/15 [fsspec]
   --------------------- ------------------  8/15 [etils]
   --------------------- ------------------  8/15 [etils]
   --------------------- ------------------  8/15 [etils]
   --------------------- ------------------  8/15 [etils]
   --------------------- ------------------  8/15 [etils]
   --------------------- ------------------  8/15 [etils]
   --------------------- ------------------  8/15 [etils]
   --------------------- ------------------  8/15 [etils]
   --------------------- ------------------  8/15 [etils]
   --------------------- ------------------  8/15 [etils]
   --------------------- ------------------  8/15 [etils]
   ------------------------ ---------------  9/15 [einops]
   ------------------------ ---------------  9/15 [einops]
   ------------------------ ---------------  9/15 [einops]
   -------------------------- ------------- 10/15 [docstring-parser]
   -------------------------- ------------- 10/15 [docstring-parser]
   ----------------------------- ---------- 11/15 [simple_parsing]
   ----------------------------- ---------- 11/15 [simple_parsing]
   ----------------------------- ---------- 11/15 [simple_parsing]
   ----------------------------- ---------- 11/15 [simple_parsing]
   ----------------------------- ---------- 11/15 [simple_parsing]
   ----------------------------- ---------- 11/15 [simple_parsing]
   ----------------------------- ---------- 11/15 [simple_parsing]
   ----------------------------- ---------- 11/15 [simple_parsing]
   ----------------------------- ---------- 11/15 [simple_parsing]
   -------------------------------- ------- 12/15 [googleapis-common-protos]
   -------------------------------- ------- 12/15 [googleapis-common-protos]
   -------------------------------- ------- 12/15 [googleapis-common-protos]
   -------------------------------- ------- 12/15 [googleapis-common-protos]
   -------------------------------- ------- 12/15 [googleapis-common-protos]
   -------------------------------- ------- 12/15 [googleapis-common-protos]
   -------------------------------- ------- 12/15 [googleapis-common-protos]
   -------------------------------- ------- 12/15 [googleapis-common-protos]
   -------------------------------- ------- 12/15 [googleapis-common-protos]
   -------------------------------- ------- 12/15 [googleapis-common-protos]
   -------------------------------- ------- 12/15 [googleapis-common-protos]
   -------------------------------- ------- 12/15 [googleapis-common-protos]
   -------------------------------- ------- 12/15 [googleapis-common-protos]
   ---------------------------------- ----- 13/15 [tensorflow-metadata]
   ---------------------------------- ----- 13/15 [tensorflow-metadata]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ------------------------------------- -- 14/15 [tensorflow-datasets]
   ---------------------------------------- 15/15 [tensorflow-datasets]

Successfully installed docstring-parser-0.16 einops-0.8.1 etils-1.12.2 fsspec-2025.3.2 googleapis-common-protos-1.70.0 immutabledict-4.2.1 importlib_resources-6.5.2 promise-2.3 protobuf-5.29.4 pyarrow-20.0.0 simple_parsing-0.1.7 tensorflow-datasets-4.9.8 tensorflow-metadata-1.17.1 toml-0.10.2 zipp-3.21.0
  DEPRECATION: Building 'promise' using the legacy setup.py bdist_wheel mechanism, which will be removed in a future version. pip 25.3 will enforce this behaviour change. A possible replacement is to use the standardized build interface by setting the `--use-pep517` option, (possibly combined with `--no-build-isolation`), or adding a `pyproject.toml` file to the source tree of 'promise'. Discussion can be found at https://github.com/pypa/pip/issues/6334
  WARNING: Failed to remove contents in a temporary directory 'C:\Users\Lenovo\AppData\Local\Programs\Python\Python312\Lib\site-packages\google\~upb'.
  You can safely remove it manually.
ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.
mysql-connector-python 8.2.0 requires protobuf<=4.21.12,>=4.21.1, but you have protobuf 5.29.4 which is incompatible.
tensorflow-intel 2.16.0rc0 requires ml-dtypes~=0.3.1, but you have ml-dtypes 0.5.1 which is incompatible.
tensorflow-intel 2.16.0rc0 requires protobuf!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev,>=3.20.3, but you have protobuf 5.29.4 which is incompatible.
In [ ]:
 
In [7]:
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from tensorflow.keras.datasets import fashion_mnist
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from tensorflow.keras.optimizers import Adam
from sklearn.metrics import confusion_matrix


# Load dataset
(X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()

# Reshape for CNN input and normalize
X_train = X_train.reshape(-1, 28, 28, 1).astype('float32') / 255.0
X_test = X_test.reshape(-1, 28, 28, 1).astype('float32') / 255.0

# One-hot encode labels
y_train_cat = to_categorical(y_train, 10)
y_test_cat = to_categorical(y_test, 10)



model = Sequential([
    Conv2D(32, (3,3), activation='relu', input_shape=(28,28,1)),
    MaxPooling2D(pool_size=(2,2)),
    Dropout(0.25),

    Conv2D(64, (3,3), activation='relu'),
    MaxPooling2D(pool_size=(2,2)),
    Dropout(0.25),

    Flatten(),
    Dense(128, activation='relu'),
    Dropout(0.5),
    Dense(10, activation='softmax')  # 10 classes
])

optimizer = Adam(learning_rate=0.001)

model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])

# Train model
model.fit(X_train, y_train_cat, epochs=10, batch_size=64, validation_split=0.2)



# Evaluate
test_loss, test_acc = model.evaluate(X_test, y_test_cat)
print(f"Test Accuracy: {test_acc*100:.2f}%")

# Predict classes
y_pred = np.argmax(model.predict(X_test), axis=1)

# Confusion matrix
cm = confusion_matrix(y_test, y_pred)
plt.figure(figsize=(8,6))
sns.heatmap(cm, annot=True, fmt='d', cmap='Purples')
plt.title('Confusion Matrix')
plt.xlabel('Predicted')
plt.ylabel('Actual')
plt.show()
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz
29515/29515 ━━━━━━━━━━━━━━━━━━━━ 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz
26421880/26421880 ━━━━━━━━━━━━━━━━━━━━ 6s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz
5148/5148 ━━━━━━━━━━━━━━━━━━━━ 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz
4422102/4422102 ━━━━━━━━━━━━━━━━━━━━ 1s 0us/step
C:\Users\Lenovo\AppData\Local\Programs\Python\Python312\Lib\site-packages\keras\src\layers\convolutional\base_conv.py:99: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(
Epoch 1/10
750/750 ━━━━━━━━━━━━━━━━━━━━ 15s 16ms/step - accuracy: 0.6411 - loss: 0.9741 - val_accuracy: 0.8486 - val_loss: 0.4277
Epoch 2/10
750/750 ━━━━━━━━━━━━━━━━━━━━ 11s 15ms/step - accuracy: 0.8252 - loss: 0.4858 - val_accuracy: 0.8674 - val_loss: 0.3582
Epoch 3/10
750/750 ━━━━━━━━━━━━━━━━━━━━ 11s 15ms/step - accuracy: 0.8483 - loss: 0.4172 - val_accuracy: 0.8765 - val_loss: 0.3259
Epoch 4/10
750/750 ━━━━━━━━━━━━━━━━━━━━ 13s 17ms/step - accuracy: 0.8626 - loss: 0.3774 - val_accuracy: 0.8867 - val_loss: 0.3063
Epoch 5/10
750/750 ━━━━━━━━━━━━━━━━━━━━ 13s 17ms/step - accuracy: 0.8718 - loss: 0.3530 - val_accuracy: 0.8977 - val_loss: 0.2817
Epoch 6/10
750/750 ━━━━━━━━━━━━━━━━━━━━ 12s 16ms/step - accuracy: 0.8796 - loss: 0.3289 - val_accuracy: 0.8969 - val_loss: 0.2740
Epoch 7/10
750/750 ━━━━━━━━━━━━━━━━━━━━ 13s 18ms/step - accuracy: 0.8821 - loss: 0.3166 - val_accuracy: 0.8992 - val_loss: 0.2745
Epoch 8/10
750/750 ━━━━━━━━━━━━━━━━━━━━ 16s 21ms/step - accuracy: 0.8900 - loss: 0.3065 - val_accuracy: 0.9017 - val_loss: 0.2687
Epoch 9/10
750/750 ━━━━━━━━━━━━━━━━━━━━ 14s 18ms/step - accuracy: 0.8920 - loss: 0.2938 - val_accuracy: 0.9057 - val_loss: 0.2557
Epoch 10/10
750/750 ━━━━━━━━━━━━━━━━━━━━ 14s 19ms/step - accuracy: 0.8923 - loss: 0.2858 - val_accuracy: 0.9075 - val_loss: 0.2491
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.9018 - loss: 0.2681
Test Accuracy: 90.13%
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step
No description has been provided for this image
In [ ]:
 
In [ ]:
 
In [ ]:
#OR 
In [ ]:
 
In [ ]:
#CNN Image Classification using CIFAR-10 - CIFAR-10 dataset, which is a well-known benchmark dataset containing 60,000 32x32 color images in 10 classes (like airplane, dog, car, etc.).
In [ ]:
 
In [9]:
import tensorflow as tf
from tensorflow.keras import layers, models
import matplotlib.pyplot as plt
import numpy as np


# Load the dataset
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()

# Normalize the images
x_train = x_train / 255.0
x_test = x_test / 255.0

# Define class names
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
               'dog', 'frog', 'horse', 'ship', 'truck']


model = models.Sequential([
    layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)),
    layers.MaxPooling2D((2, 2)),

    layers.Conv2D(64, (3, 3), activation='relu'),
    layers.MaxPooling2D((2, 2)),

    layers.Conv2D(64, (3, 3), activation='relu'),
    
    layers.Flatten(),
    layers.Dense(64, activation='relu'),
    layers.Dense(10)  # 10 classes
])



model.compile(optimizer='adam',
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])

history = model.fit(x_train, y_train, epochs=10, 
                    validation_data=(x_test, y_test))


test_loss, test_acc = model.evaluate(x_test, y_test, verbose=2)
print(f"Test Accuracy: {test_acc:.2f}")



plt.plot(history.history['accuracy'], label='train acc')
plt.plot(history.history['val_accuracy'], label='val acc')
plt.legend()
plt.title('Model Accuracy')
plt.show()


from sklearn.metrics import confusion_matrix, classification_report
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np


#confusiom matrix



# Use the trained model to get predictions
y_pred_logits = model.predict(x_test)
y_pred = np.argmax(y_pred_logits, axis=1)

# Flatten the true labels (they are in shape (10000,1))
y_true = y_test.flatten()



# Generate the confusion matrix
cm = confusion_matrix(y_true, y_pred)

# Plot the confusion matrix
plt.figure(figsize=(10, 8))
sns.heatmap(cm, annot=True, fmt="d", cmap="Blues", 
            xticklabels=class_names, yticklabels=class_names)
plt.xlabel('Predicted Label')
plt.ylabel('True Label')
plt.title('Confusion Matrix - CIFAR-10')
plt.show()



print("Classification Report:\n")
print(classification_report(y_true, y_pred, target_names=class_names))
C:\Users\Lenovo\AppData\Local\Programs\Python\Python312\Lib\site-packages\keras\src\layers\convolutional\base_conv.py:99: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(
Epoch 1/10
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 19s 11ms/step - accuracy: 0.3515 - loss: 1.7454 - val_accuracy: 0.5678 - val_loss: 1.2214
Epoch 2/10
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 17s 11ms/step - accuracy: 0.5856 - loss: 1.1625 - val_accuracy: 0.6177 - val_loss: 1.0944
Epoch 3/10
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 19s 12ms/step - accuracy: 0.6575 - loss: 0.9758 - val_accuracy: 0.6608 - val_loss: 0.9788
Epoch 4/10
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 18s 12ms/step - accuracy: 0.6918 - loss: 0.8773 - val_accuracy: 0.6725 - val_loss: 0.9313
Epoch 5/10
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 17s 11ms/step - accuracy: 0.7163 - loss: 0.8095 - val_accuracy: 0.6721 - val_loss: 0.9379
Epoch 6/10
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 15s 10ms/step - accuracy: 0.7406 - loss: 0.7430 - val_accuracy: 0.6961 - val_loss: 0.8765
Epoch 7/10
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 15s 10ms/step - accuracy: 0.7596 - loss: 0.6835 - val_accuracy: 0.6928 - val_loss: 0.9034
Epoch 8/10
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 16s 10ms/step - accuracy: 0.7826 - loss: 0.6203 - val_accuracy: 0.7132 - val_loss: 0.8590
Epoch 9/10
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 18s 11ms/step - accuracy: 0.7918 - loss: 0.5903 - val_accuracy: 0.7083 - val_loss: 0.8857
Epoch 10/10
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 17s 11ms/step - accuracy: 0.8095 - loss: 0.5411 - val_accuracy: 0.7185 - val_loss: 0.8501
313/313 - 1s - 4ms/step - accuracy: 0.7185 - loss: 0.8501
Test Accuracy: 0.72
No description has been provided for this image
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step
No description has been provided for this image
Classification Report:

              precision    recall  f1-score   support

    airplane       0.75      0.73      0.74      1000
  automobile       0.80      0.84      0.82      1000
        bird       0.57      0.67      0.61      1000
         cat       0.56      0.54      0.55      1000
        deer       0.68      0.67      0.67      1000
         dog       0.63      0.62      0.63      1000
        frog       0.80      0.80      0.80      1000
       horse       0.80      0.75      0.77      1000
        ship       0.82      0.81      0.81      1000
       truck       0.82      0.76      0.79      1000

    accuracy                           0.72     10000
   macro avg       0.72      0.72      0.72     10000
weighted avg       0.72      0.72      0.72     10000

In [ ]:
 
In [ ]:
 
In [ ]:
'''5. Design and implement Deep Convolutional GAN to generate images of faces/digits from a set of
given images.
In [ ]:
 
In [10]:
import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
import matplotlib.pyplot as plt


(x_train, _), (_, _) = tf.keras.datasets.mnist.load_data()
x_train = x_train.reshape(-1, 28, 28, 1).astype('float32')
x_train = (x_train - 127.5) / 127.5  # Normalize to [-1, 1]



def build_generator():
    model = tf.keras.Sequential([
        layers.Dense(7*7*256, use_bias=False, input_shape=(100,)),
        layers.BatchNormalization(),
        layers.LeakyReLU(),

        layers.Reshape((7, 7, 256)),
        layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False),
        layers.BatchNormalization(),
        layers.LeakyReLU(),

        layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False),
        layers.BatchNormalization(),
        layers.LeakyReLU(),

        layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')
    ])
    return model



def build_discriminator():
    model = tf.keras.Sequential([
        layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', input_shape=[28, 28, 1]),
        layers.LeakyReLU(),
        layers.Dropout(0.3),

        layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'),
        layers.LeakyReLU(),
        layers.Dropout(0.3),

        layers.Flatten(),
        layers.Dense(1)
    ])
    return model



cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)

def discriminator_loss(real_output, fake_output):
    real_loss = cross_entropy(tf.ones_like(real_output), real_output)
    fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
    return real_loss + fake_loss

def generator_loss(fake_output):
    return cross_entropy(tf.ones_like(fake_output), fake_output)

generator = build_generator()
discriminator = build_discriminator()

generator_optimizer = tf.keras.optimizers.Adam(1e-4)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)


EPOCHS = 50
BATCH_SIZE = 256
noise_dim = 100
num_examples_to_generate = 16
seed = tf.random.normal([num_examples_to_generate, noise_dim])

# Batch and shuffle the data
train_dataset = tf.data.Dataset.from_tensor_slices(x_train).shuffle(60000).batch(BATCH_SIZE)




@tf.function
def train_step(images):
    noise = tf.random.normal([BATCH_SIZE, noise_dim])

    with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
        generated_images = generator(noise, training=True)

        real_output = discriminator(images, training=True)
        fake_output = discriminator(generated_images, training=True)

        gen_loss = generator_loss(fake_output)
        disc_loss = discriminator_loss(real_output, fake_output)

    gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
    gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)

    generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
    discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))




import os
from IPython import display

def generate_and_save_images(model, epoch, test_input):
    predictions = model(test_input, training=False)
    fig = plt.figure(figsize=(4, 4))

    for i in range(predictions.shape[0]):
        plt.subplot(4, 4, i+1)
        plt.imshow((predictions[i, :, :, 0] + 1) / 2, cmap='gray')
        plt.axis('off')

    plt.suptitle(f'Epoch {epoch}')
    plt.show()

def train(dataset, epochs):
    for epoch in range(epochs):
        for image_batch in dataset:
            train_step(image_batch)

        # Produce images after every epoch
        generate_and_save_images(generator, epoch + 1, seed)

train(train_dataset, EPOCHS)
C:\Users\Lenovo\AppData\Local\Programs\Python\Python312\Lib\site-packages\keras\src\layers\core\dense.py:85: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
C:\Users\Lenovo\AppData\Local\Programs\Python\Python312\Lib\site-packages\keras\src\layers\convolutional\base_conv.py:99: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
Cell In[10], line 125
    122         # Produce images after every epoch
    123         generate_and_save_images(generator, epoch + 1, seed)
--> 125 train(train_dataset, EPOCHS)

Cell In[10], line 120, in train(dataset, epochs)
    118 for epoch in range(epochs):
    119     for image_batch in dataset:
--> 120         train_step(image_batch)
    122     # Produce images after every epoch
    123     generate_and_save_images(generator, epoch + 1, seed)

File ~\AppData\Local\Programs\Python\Python312\Lib\site-packages\tensorflow\python\util\traceback_utils.py:150, in filter_traceback.<locals>.error_handler(*args, **kwargs)
    148 filtered_tb = None
    149 try:
--> 150   return fn(*args, **kwargs)
    151 except Exception as e:
    152   filtered_tb = _process_traceback_frames(e.__traceback__)

File ~\AppData\Local\Programs\Python\Python312\Lib\site-packages\tensorflow\python\eager\polymorphic_function\polymorphic_function.py:833, in Function.__call__(self, *args, **kwds)
    830 compiler = "xla" if self._jit_compile else "nonXla"
    832 with OptionalXlaContext(self._jit_compile):
--> 833   result = self._call(*args, **kwds)
    835 new_tracing_count = self.experimental_get_tracing_count()
    836 without_tracing = (tracing_count == new_tracing_count)

File ~\AppData\Local\Programs\Python\Python312\Lib\site-packages\tensorflow\python\eager\polymorphic_function\polymorphic_function.py:869, in Function._call(self, *args, **kwds)
    866   self._lock.release()
    867   # In this case we have created variables on the first call, so we run the
    868   # defunned version which is guaranteed to never create variables.
--> 869   return tracing_compilation.call_function(
    870       args, kwds, self._no_variable_creation_config
    871   )
    872 elif self._variable_creation_config is not None:
    873   # Release the lock early so that multiple threads can perform the call
    874   # in parallel.
    875   self._lock.release()

File ~\AppData\Local\Programs\Python\Python312\Lib\site-packages\tensorflow\python\eager\polymorphic_function\tracing_compilation.py:139, in call_function(args, kwargs, tracing_options)
    137 bound_args = function.function_type.bind(*args, **kwargs)
    138 flat_inputs = function.function_type.unpack_inputs(bound_args)
--> 139 return function._call_flat(  # pylint: disable=protected-access
    140     flat_inputs, captured_inputs=function.captured_inputs
    141 )

File ~\AppData\Local\Programs\Python\Python312\Lib\site-packages\tensorflow\python\eager\polymorphic_function\concrete_function.py:1322, in ConcreteFunction._call_flat(self, tensor_inputs, captured_inputs)
   1318 possible_gradient_type = gradients_util.PossibleTapeGradientTypes(args)
   1319 if (possible_gradient_type == gradients_util.POSSIBLE_GRADIENT_TYPES_NONE
   1320     and executing_eagerly):
   1321   # No tape is watching; skip to running the function.
-> 1322   return self._inference_function.call_preflattened(args)
   1323 forward_backward = self._select_forward_and_backward_functions(
   1324     args,
   1325     possible_gradient_type,
   1326     executing_eagerly)
   1327 forward_function, args_with_tangents = forward_backward.forward()

File ~\AppData\Local\Programs\Python\Python312\Lib\site-packages\tensorflow\python\eager\polymorphic_function\atomic_function.py:216, in AtomicFunction.call_preflattened(self, args)
    214 def call_preflattened(self, args: Sequence[core.Tensor]) -> Any:
    215   """Calls with flattened tensor inputs and returns the structured output."""
--> 216   flat_outputs = self.call_flat(*args)
    217   return self.function_type.pack_output(flat_outputs)

File ~\AppData\Local\Programs\Python\Python312\Lib\site-packages\tensorflow\python\eager\polymorphic_function\atomic_function.py:251, in AtomicFunction.call_flat(self, *args)
    249 with record.stop_recording():
    250   if self._bound_context.executing_eagerly():
--> 251     outputs = self._bound_context.call_function(
    252         self.name,
    253         list(args),
    254         len(self.function_type.flat_outputs),
    255     )
    256   else:
    257     outputs = make_call_op_in_graph(
    258         self,
    259         list(args),
    260         self._bound_context.function_call_options.as_attrs(),
    261     )

File ~\AppData\Local\Programs\Python\Python312\Lib\site-packages\tensorflow\python\eager\context.py:1500, in Context.call_function(self, name, tensor_inputs, num_outputs)
   1498 cancellation_context = cancellation.context()
   1499 if cancellation_context is None:
-> 1500   outputs = execute.execute(
   1501       name.decode("utf-8"),
   1502       num_outputs=num_outputs,
   1503       inputs=tensor_inputs,
   1504       attrs=attrs,
   1505       ctx=self,
   1506   )
   1507 else:
   1508   outputs = execute.execute_with_cancellation(
   1509       name.decode("utf-8"),
   1510       num_outputs=num_outputs,
   (...)
   1514       cancellation_manager=cancellation_context,
   1515   )

File ~\AppData\Local\Programs\Python\Python312\Lib\site-packages\tensorflow\python\eager\execute.py:53, in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
     51 try:
     52   ctx.ensure_initialized()
---> 53   tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
     54                                       inputs, attrs, num_outputs)
     55 except core._NotOkStatusException as e:
     56   if name is not None:

KeyboardInterrupt: 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
#OR
In [ ]:
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Reshape, Flatten, Conv2D, Conv2DTranspose, LeakyReLU, BatchNormalization
from tensorflow.keras.optimizers import Adam



(X_train, _), (_, _) = mnist.load_data()

# Normalize and reshape for CNN
X_train = X_train / 127.5 - 1.0  # Normalize to [-1, 1]
X_train = X_train.reshape(-1, 28, 28, 1)


def build_generator():
    model = Sequential()
    model.add(Dense(7*7*128, input_dim=100))
    model.add(LeakyReLU(0.2))
    model.add(Reshape((7, 7, 128)))

    model.add(Conv2DTranspose(128, (4,4), strides=(2,2), padding='same'))
    model.add(BatchNormalization())
    model.add(LeakyReLU(0.2))

    model.add(Conv2DTranspose(64, (4,4), strides=(2,2), padding='same'))
    model.add(BatchNormalization())
    model.add(LeakyReLU(0.2))

    model.add(Conv2D(1, (7,7), activation='tanh', padding='same'))
    return model



def build_discriminator():
    model = Sequential()
    model.add(Conv2D(64, (3,3), strides=(2,2), padding='same', input_shape=(28,28,1)))
    model.add(LeakyReLU(0.2))

    model.add(Conv2D(128, (3,3), strides=(2,2), padding='same'))
    model.add(LeakyReLU(0.2))
    
    model.add(Flatten())
    model.add(Dense(1, activation='sigmoid'))
    return model



def build_gan(generator, discriminator):
    discriminator.compile(optimizer=Adam(0.0002), loss='binary_crossentropy', metrics=['accuracy'])
    discriminator.trainable = False

    gan = Sequential([generator, discriminator])
    gan.compile(optimizer=Adam(0.0002), loss='binary_crossentropy')
    return gan



def train_dcgan(epochs=10000, batch_size=128, save_interval=1000):
    half_batch = batch_size // 2

    for epoch in range(epochs):
        # --- Train Discriminator ---
        idx = np.random.randint(0, X_train.shape[0], half_batch)
        real_imgs = X_train[idx]

        noise = np.random.normal(0, 1, (half_batch, 100))
        fake_imgs = generator.predict(noise)

        d_loss_real = discriminator.train_on_batch(real_imgs, np.ones((half_batch, 1)))
        d_loss_fake = discriminator.train_on_batch(fake_imgs, np.zeros((half_batch, 1)))
        d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

        # --- Train Generator ---
        noise = np.random.normal(0, 1, (batch_size, 100))
        g_loss = gan.train_on_batch(noise, np.ones((batch_size, 1)))

        # Log
        if epoch % 100 == 0:
            print(f"{epoch} [D loss: {d_loss[0]:.4f}, acc.: {100*d_loss[1]:.2f}%] [G loss: {g_loss:.4f}]")

        # Save sample
        if epoch % save_interval == 0:
            save_generated_images(epoch)

# Generate image sample
def save_generated_images(epoch, examples=25):
    noise = np.random.normal(0, 1, (examples, 100))
    gen_imgs = generator.predict(noise)
    gen_imgs = 0.5 * gen_imgs + 0.5  # rescale to [0, 1]

    fig, axs = plt.subplots(5, 5)
    cnt = 0
    for i in range(5):
        for j in range(5):
            axs[i,j].imshow(gen_imgs[cnt, :, :, 0], cmap='gray')
            axs[i,j].axis('off')
            cnt += 1
    plt.suptitle(f"Generated Digits at Epoch {epoch}")
    plt.show()





generator = build_generator()
discriminator = build_discriminator()
gan = build_gan(generator, discriminator)

train_dcgan(epochs=5, batch_size=64, save_interval=1000)  #GIVE EPOCH HERE
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
'''Perform Sentiment Analysis in the network graph using RNN.
In [ ]:
 
In [1]:
pip install networkx 
Collecting networkx
  Downloading networkx-3.4.2-py3-none-any.whl.metadata (6.3 kB)
Downloading networkx-3.4.2-py3-none-any.whl (1.7 MB)
   ---------------------------------------- 0.0/1.7 MB ? eta -:--:--
   ---------------------------------------- 1.7/1.7 MB 23.2 MB/s eta 0:00:00
Installing collected packages: networkx
Successfully installed networkx-3.4.2
Note: you may need to restart the kernel to use updated packages.
In [ ]:
 
In [4]:
#create data
import networkx as nx
import matplotlib.pyplot as plt

# Sample graph with text data (nodes with comments or reviews)
G = nx.Graph()

# Add nodes with reviews (in reality, use actual social data or product comments)
G.add_node(1, review="I love this product, it's amazing!")
G.add_node(2, review="This is the worst thing I have ever bought.")
G.add_node(3, review="Meh, it's okay. Nothing special.")
G.add_node(4, review="Absolutely fantastic! Highly recommend.")
G.add_node(5, review="Terrible service and bad quality.")

# Add edges (connections can be user interactions, similarities, etc.)
G.add_edges_from([(1, 2), (2, 3), (3, 4), (4, 5)])



#preprocessing
import nltk
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences

nltk.download('punkt')

reviews = [G.nodes[n]['review'] for n in G.nodes]
labels = [1, 0, 1, 1, 0]  # 1: Positive, 0: Negative (for demonstration)

tokenizer = Tokenizer(num_words=1000, oov_token="<OOV>")
tokenizer.fit_on_texts(reviews)
sequences = tokenizer.texts_to_sequences(reviews)
padded = pad_sequences(sequences, maxlen=10, padding='post')




#building model 

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, LSTM, Dense

model = Sequential([
    Embedding(input_dim=1000, output_dim=16),
    LSTM(32),
    Dense(1, activation='sigmoid')
])

model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])

import numpy as np

# Convert to NumPy arrays explicitly
padded = np.array(padded)
labels = np.array(labels)

model.fit(padded, labels, epochs=10,verbose =1)





#predictions

predictions = model.predict(padded)
sentiments = ['green' if p > 0.5 else 'red' for p in predictions]

# Draw the network with node color as sentiment
plt.figure(figsize=(8, 6))
nx.draw(G, with_labels=True, node_color=sentiments, node_size=1000, font_color='white')
plt.title("Network Graph with Sentiment Analysis (Green=Positive, Red=Negative)")
plt.show()
Epoch 1/10
[nltk_data] Downloading package punkt to
[nltk_data]     C:\Users\Lenovo\AppData\Roaming\nltk_data...
[nltk_data]   Package punkt is already up-to-date!
1/1 ━━━━━━━━━━━━━━━━━━━━ 3s 3s/step - accuracy: 0.2000 - loss: 0.6959
Epoch 2/10
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 45ms/step - accuracy: 0.6000 - loss: 0.6941
Epoch 3/10
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 43ms/step - accuracy: 0.6000 - loss: 0.6923
Epoch 4/10
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 52ms/step - accuracy: 0.6000 - loss: 0.6906
Epoch 5/10
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 58ms/step - accuracy: 0.6000 - loss: 0.6888
Epoch 6/10
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 49ms/step - accuracy: 0.6000 - loss: 0.6871
Epoch 7/10
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 47ms/step - accuracy: 0.6000 - loss: 0.6853
Epoch 8/10
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 45ms/step - accuracy: 0.6000 - loss: 0.6834
Epoch 9/10
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 40ms/step - accuracy: 0.6000 - loss: 0.6814
Epoch 10/10
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 84ms/step - accuracy: 0.6000 - loss: 0.6794
1/1 ━━━━━━━━━━━━━━━━━━━━ 1s 564ms/step
No description has been provided for this image
In [ ]:
 
In [ ]:
 
In [ ]:
#OR 
In [ ]:
 
In [5]:
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, LSTM, Dense
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences

# Sample data
texts = ["I love this product", "This is terrible", "Absolutely great experience"]
labels = [1, 0, 1]  # Binary sentiment

# Tokenization
tokenizer = Tokenizer(num_words=5000)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
X = pad_sequences(sequences, maxlen=100)

# Convert labels to NumPy array
labels = np.array(labels)

# Model
model = Sequential([
    Embedding(input_dim=5000, output_dim=64),  # removed input_length warning
    LSTM(64),
    Dense(1, activation='sigmoid')
])

model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X, labels, epochs=5)



def predict_sentiment(text):
    seq = tokenizer.texts_to_sequences([text])
    padded = pad_sequences(seq, maxlen=100)
    prediction = model.predict(padded)[0][0]
    return 'positive' if prediction > 0.5 else 'negative'



import networkx as nx

G = nx.DiGraph()

# Add nodes with sentiment
nodes = {
    1: "I love this!",
    2: "This is bad",
    3: "Agreed!"
}
for node_id, text in nodes.items():
    sentiment = predict_sentiment(text)
    G.add_node(node_id, sentiment=sentiment)

# Add edges (e.g., reply or retweet)
G.add_edge(2, 1)  # user 2 replied to user 1
G.add_edge(3, 1)

# Visualize
import matplotlib.pyplot as plt

color_map = []
for node in G:
    color_map.append('green' if G.nodes[node]['sentiment'] == 'positive' else 'red')

nx.draw(G, with_labels=True, node_color=color_map)
plt.show()
Epoch 1/5
1/1 ━━━━━━━━━━━━━━━━━━━━ 2s 2s/step - accuracy: 0.6667 - loss: 0.6933
Epoch 2/5
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 66ms/step - accuracy: 0.6667 - loss: 0.6864
Epoch 3/5
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step - accuracy: 0.6667 - loss: 0.6796
Epoch 4/5
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 60ms/step - accuracy: 0.6667 - loss: 0.6727
Epoch 5/5
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 60ms/step - accuracy: 0.6667 - loss: 0.6655
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 141ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 27ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 26ms/step
No description has been provided for this image
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
'''2. Data Visualization from Extraction Transformation and Loading (ETL) Process
In [ ]:
 
In [6]:
#extract

import pandas as pd

# Simulate data extraction from CSV
df = pd.read_csv("https://raw.githubusercontent.com/mwaskom/seaborn-data/master/tips.csv")
print(df.head())


#transform


# Example transformations:
df['total_bill'] = df['total_bill'].round(2)
df['tip_percent'] = round((df['tip'] / df['total_bill']) * 100, 2)

# Optional: categorize tip percentage
df['tip_level'] = pd.cut(df['tip_percent'],
                         bins=[0, 10, 20, 100],
                         labels=['Low', 'Medium', 'High'])



#load

# Save the transformed data locally (simulate loading)
df.to_csv("cleaned_tips.csv", index=False)



#visualisation 

import seaborn as sns
import matplotlib.pyplot as plt

# Total bill vs Tip
sns.scatterplot(data=df, x="total_bill", y="tip", hue="tip_level")
plt.title("Total Bill vs Tip Colored by Tip Level")
plt.show()

# Tip percentage by day
sns.boxplot(x="day", y="tip_percent", data=df)
plt.title("Tip % Distribution by Day")
plt.show()

# Count of tips by tip level
sns.countplot(x="tip_level", data=df)
plt.title("Count of Tip Levels")
plt.show()
   total_bill   tip     sex smoker  day    time  size
0       16.99  1.01  Female     No  Sun  Dinner     2
1       10.34  1.66    Male     No  Sun  Dinner     3
2       21.01  3.50    Male     No  Sun  Dinner     3
3       23.68  3.31    Male     No  Sun  Dinner     2
4       24.59  3.61  Female     No  Sun  Dinner     4
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
In [ ]:
 
In [ ]:
 
In [ ]:
#OR 
In [ ]:
 
In [7]:
#extract

import seaborn as sns
import pandas as pd

# Load the Titanic dataset (inbuilt in seaborn)
df = sns.load_dataset('titanic')

# View the first few rows
print(df.head())



#transform

# Drop rows with missing 'age' or 'embarked' values
df = df.dropna(subset=['age', 'embarked'])

# Create a new column for age group
df['age_group'] = pd.cut(df['age'], bins=[0, 12, 18, 35, 60, 100],
                         labels=['Child', 'Teen', 'Young Adult', 'Adult', 'Senior'])

# Map class codes to readable labels (optional)
df['class'] = df['class'].map({'First': '1st', 'Second': '2nd', 'Third': '3rd'})



#load

# You could store to CSV or continue using in memory
df.to_csv("cleaned_titanic.csv", index=False)




#visualise

import matplotlib.pyplot as plt

# Survival count
sns.countplot(x='survived', data=df)
plt.title("Survival Count (0 = Not Survived, 1 = Survived)")
plt.show()

# Survival by class
sns.countplot(x='class', hue='survived', data=df)
plt.title("Survival by Passenger Class")
plt.show()

# Age distribution by survival
sns.boxplot(x='survived', y='age', data=df)
plt.title("Age Distribution by Survival")
plt.show()

# Survival by age group
sns.countplot(x='age_group', hue='survived', data=df)
plt.title("Survival by Age Group")
plt.show()
   survived  pclass     sex   age  sibsp  parch     fare embarked  class  \
0         0       3    male  22.0      1      0   7.2500        S  Third   
1         1       1  female  38.0      1      0  71.2833        C  First   
2         1       3  female  26.0      0      0   7.9250        S  Third   
3         1       1  female  35.0      1      0  53.1000        S  First   
4         0       3    male  35.0      0      0   8.0500        S  Third   

     who  adult_male deck  embark_town alive  alone  
0    man        True  NaN  Southampton    no  False  
1  woman       False    C    Cherbourg   yes  False  
2  woman       False  NaN  Southampton   yes   True  
3  woman       False    C  Southampton   yes  False  
4    man        True  NaN  Southampton    no   True  
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
''' Perform the data classification algorithm using any Classification algorithm
In [ ]:
 
In [8]:
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import classification_report, confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns

# Load the Iris dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target



# Split dataset into 80% train and 20% test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)




# Create SVM classifier with linear kernel
svm_model = SVC(kernel='linear')
svm_model.fit(X_train, y_train)



# Make predictions on test set
y_pred = svm_model.predict(X_test)

# Confusion Matrix
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot=True, cmap='Blues', xticklabels=iris.target_names, yticklabels=iris.target_names)
plt.title("Confusion Matrix")
plt.xlabel("Predicted")
plt.ylabel("True")
plt.show()

# Classification Report
print("Classification Report:\n", classification_report(y_test, y_pred, target_names=iris.target_names))
No description has been provided for this image
Classification Report:
               precision    recall  f1-score   support

      setosa       1.00      1.00      1.00        10
  versicolor       1.00      1.00      1.00         9
   virginica       1.00      1.00      1.00        11

    accuracy                           1.00        30
   macro avg       1.00      1.00      1.00        30
weighted avg       1.00      1.00      1.00        30

In [ ]:
 
In [ ]:
 
In [ ]:
#OR
In [ ]:
 
In [9]:
import pandas as pd
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix



iris = datasets.load_iris()
X = iris.data  # Features: sepal length, sepal width, petal length, petal width
y = iris.target  # Target: species (0=Setosa, 1=Versicolor, 2=Virginica)


X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)


scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)


model = LogisticRegression(max_iter=200)


model.fit(X_train, y_train)



y_pred = model.predict(X_test)



print("Accuracy:", accuracy_score(y_test, y_pred))
print("\nClassification Report:\n", classification_report(y_test, y_pred))
print("\nConfusion Matrix:\n", confusion_matrix(y_test, y_pred))
Accuracy: 1.0

Classification Report:
               precision    recall  f1-score   support

           0       1.00      1.00      1.00        10
           1       1.00      1.00      1.00         9
           2       1.00      1.00      1.00        11

    accuracy                           1.00        30
   macro avg       1.00      1.00      1.00        30
weighted avg       1.00      1.00      1.00        30


Confusion Matrix:
 [[10  0  0]
 [ 0  9  0]
 [ 0  0 11]]
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
'''Perform the data clustering algorithm using any Clustering algorithm
In [ ]:
 
In [10]:
from sklearn import datasets
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from sklearn.decomposition import PCA



# Load iris dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target  # actual labels (only for evaluation)

# Convert to DataFrame for better visualization
df = pd.DataFrame(X, columns=iris.feature_names)




# Apply KMeans clustering
kmeans = KMeans(n_clusters=3, random_state=42)
kmeans.fit(X)

# Add cluster labels to the DataFrame
df['Cluster'] = kmeans.labels_


#Visualize Clusters using PCA

# Reduce dimensions to 2D using PCA for visualization
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X)

# Plot the clusters
plt.figure(figsize=(8,6))
sns.scatterplot(x=X_pca[:, 0], y=X_pca[:, 1], hue=kmeans.labels_, palette='Set1')
plt.title('K-Means Clustering on Iris Dataset (PCA Reduced)')
plt.xlabel('PCA 1')
plt.ylabel('PCA 2')
plt.legend(title='Cluster')
plt.show()


#compare with actual labels

# Visualize actual vs. predicted labels
plt.figure(figsize=(8,6))
sns.scatterplot(x=X_pca[:, 0], y=X_pca[:, 1], hue=y, palette='Set2')
plt.title('Actual Species Distribution (PCA Reduced)')
plt.xlabel('PCA 1')
plt.ylabel('PCA 2')
plt.legend(title='Species')
plt.show()
No description has been provided for this image
No description has been provided for this image
In [ ]:
 
In [ ]:
 
In [ ]:
#OR 
In [ ]:
 
In [13]:
import pandas as pd
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import seaborn as sns


iris = datasets.load_iris()
X = iris.data  # Features: sepal length, sepal width, petal length, petal width


scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)



kmeans = KMeans(n_clusters=3, random_state=42)
kmeans.fit(X_scaled)


labels = kmeans.labels_
centers = kmeans.cluster_centers_

plt.figure(figsize=(8, 6))
plt.scatter(X_scaled[:, 0], X_scaled[:, 1], c=labels, cmap='viridis', marker='o', edgecolor='k', s=100)


plt.scatter(centers[:, 0], centers[:, 1], c='red', marker='X', s=200, label='Centroids')


plt.title('K-Means Clustering (Iris Dataset)')
plt.xlabel('Sepal Length (scaled)')
plt.ylabel('Sepal Width (scaled)')
plt.legend()
plt.show()


plt.figure(figsize=(8, 6))
plt.scatter(X_scaled[:, 0], X_scaled[:, 1], c=labels, cmap='viridis', marker='o', edgecolor='k', s=100)


plt.scatter(centers[:, 0], centers[:, 1], c='red', marker='X', s=200, label='Centroids')



from sklearn.metrics import adjusted_rand_score


true_labels = iris.target


ari_score = adjusted_rand_score(true_labels, labels)
print(f"Adjusted Rand Index (ARI) score: {ari_score:.4f}")
No description has been provided for this image
Adjusted Rand Index (ARI) score: 0.4328
No description has been provided for this image
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
''' Import Data from different Sources such as (Excel, Sql Server, Oracle etc.) and load in
 targeted(power Bi)
In [ ]:
 
In [ ]:
To import data from different sources such as Excel, SQL Server, Oracle, etc., and load it into a target destination (like Power BI, Excel, or a database), follow this guide depending on the tools you're using.

✅ Using Power BI (Most Common Tool for ETL + Visualization)
📥 1. Import Data from Excel:
Open Power BI Desktop.

Click on Home > Get Data > Excel.

Browse and select the Excel file.

Select the sheet(s) you want.

Click Load (or Transform to clean before loading).

🛢️ 2. Import Data from SQL Server:
Click on Home > Get Data > SQL Server.

Enter:

Server name (e.g., localhost\SQLEXPRESS)

Optional: Database name

Choose Import or DirectQuery.

Sign in if needed and select tables/views to load.

🟠 3. Import Data from Oracle:
Go to Home > Get Data > More... > Database > Oracle Database.

Enter:

Server name (TNS or connection string)

You may need to install the Oracle client software on your system.

Authenticate and select the data to load.

🎯 Target: Load and Visualize the Data
Once imported, you can:

Clean/transform data using Power Query Editor.

Load data into Power BI's data model.

Create visualizations, dashboards, and reports using charts, tables, slicers, etc.

✅ Alternative: Load to Excel or SQL
If you're working in Excel (Power Query):

Use Data > Get Data from various sources.

Load into a new sheet or data model.

Use PivotTables, charts, or Power Pivot.

If you're targeting SQL Server:

Use SSIS (SQL Server Integration Services) for advanced ETL pipelines.
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
''' Perform the Extraction Transformation and Loading (ETL) process to construct the database
 in the PowerBI
In [ ]:
Perform ETL (Extract, Transform, Load) and create a dashboard using Power BI.

🔹 STEP 1: EXTRACTION (Data Loading)
Open Power BI Desktop

Click on Home > Get Data > Excel

Choose this built-in dataset:
📂 Location:
C:\Users\<YourUsername>\Documents\Power BI Desktop\Sample Files\Financial Sample.xlsx
(or download from: Financial Sample Excel - Microsoft)

Select the Financials sheet and click Load

🔹 STEP 2: TRANSFORMATION (Data Cleaning in Power Query Editor)
Click on Transform Data to open Power Query Editor

Clean your data:

Remove any unnecessary columns (e.g., Segment)

Fix column types (ensure Sales, Profit, etc., are Decimal Number)

Rename confusing column headers (e.g., Units Sold → Units)

Filter out any blank/null rows if present

Click Close & Apply to return to Power BI

🔹 STEP 3: LOADING (Data Model)
Data is now loaded into Power BI Model

You can check Model View to see relationships if working with multiple tables

🔹 STEP 4: DATA VISUALIZATION (Dashboards in Power BI)
On the Report tab, start adding visualizations:

📊 Recommended Charts:
Visual	Fields Used
Bar Chart	Region vs. Total Sales
Pie Chart	Product Category vs. Profit
Line Chart	Month vs. Sales
Table	Country, Sales, Profit, Units
KPI	Total Sales, Total Profit

🔧 Add Filters:
Use Slicers for Region, Product, or Year

Create a Date Hierarchy for Time-Series

✅ Optional Enhancements:
Add a Page Title

Format visuals (color, label, axis)

Save report and publish to Power BI Service
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
'''Perform Data Analysis and Visualization using Advanced Excel.
In [ ]:
Here's a simple yet effective guide to Data Analysis and Visualization using Advanced Excel, including the steps and features you can use, along with downloadable resources and sample ideas.

✅ 1. Sample Dataset to Use
Use a built-in dataset or download one such as:

Microsoft Excel Sample Datasets

Or use Kaggle datasets: https://www.kaggle.com/datasets

✅ 2. Advanced Excel Features for Data Analysis
📌 a. Power Query (ETL in Excel)
Go to Data > Get & Transform (Power Query).

Load data from various sources (CSV, web, Excel, database).

Clean, reshape, merge, and transform your dataset.

📌 b. Pivot Tables & Pivot Charts
Insert PivotTable: Insert > PivotTable.

Drag fields to Rows, Columns, and Values to summarize data.

Create a PivotChart for visual representation.

📌 c. Advanced Formulas
LOOKUPs: VLOOKUP, HLOOKUP, XLOOKUP

Conditional Functions: IF, IFS, SWITCH

Aggregation: SUMIF, COUNTIF, AVERAGEIF

Text & Date Functions: CONCAT, TEXTJOIN, YEAR, MONTH

📌 d. Data Validation
Set rules for entering data (drop-downs, ranges).

Data > Data Validation

📌 e. Conditional Formatting
Highlight data using color scales, icon sets, or custom formulas.

Useful for spotting trends and outliers.

✅ 3. Visualization with Excel Charts
Use Insert > Charts:

Column/Bar Charts: Compare categories

Line Charts: Trend over time

Pie Charts: Part-to-whole

Combo Charts: Two variables together

Slicers and Timelines: Interactivity with PivotTables

✅ 4. Dashboard Creation
Combine:

Pivot Tables

Slicers

Charts

Conditional Formatting

To build an interactive dashboard on a single sheet.
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
'''----------------------------------------------------------------------------------------------------------------------------------------------------
In [ ]:
 
In [ ]:
 
In [ ]:
'''A real estate company wants to predict house prices based on multiple features such as
area income, house age, number of rooms, and population. Develop a Linear
Regression model that can accurately estimate the price of a house using these
features.
In [ ]:
 
In [1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns

from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics

# Simulate a real estate dataset using make_regression
X, y = make_regression(n_samples=1000, n_features=4, noise=10, random_state=42)

# Create a DataFrame to mimic house features
columns = ['Avg. Area Income', 'Avg. Area House Age', 'Avg. Area Number of Rooms', 'Area Population']
df = pd.DataFrame(X, columns=columns)
df['Price'] = y

# Explore the data
print(df.head())
print(df.describe())
print(df.corr())

# Visualize feature correlation
sns.heatmap(df.corr(), annot=True, cmap='coolwarm')
plt.title("Feature Correlation")
plt.show()

# Features and Target
X = df.drop('Price', axis=1)
y = df['Price']

# Train/Test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# Build Linear Regression model
model = LinearRegression()
model.fit(X_train, y_train)

# Coefficients and Intercept
print("Intercept:", model.intercept_)
coeff_df = pd.DataFrame(model.coef_, X.columns, columns=["Coefficient"])
print(coeff_df)

# Predictions
predictions = model.predict(X_test)

# Evaluate the model
print("MAE:", metrics.mean_absolute_error(y_test, predictions))
print("MSE:", metrics.mean_squared_error(y_test, predictions))
print("RMSE:", np.sqrt(metrics.mean_squared_error(y_test, predictions)))

# Visualize Predictions vs Actual
plt.scatter(y_test, predictions)
plt.xlabel("Actual Price")
plt.ylabel("Predicted Price")
plt.title("Actual vs Predicted Prices")
plt.show()

# Distribution of errors
sns.histplot(y_test - predictions, kde=True, bins=30)
plt.title("Distribution of Prediction Errors")
plt.show()
   Avg. Area Income  Avg. Area House Age  Avg. Area Number of Rooms  \
0         -0.359292             1.846707                   0.583928   
1          0.369642            -0.333819                   1.173125   
2          1.237816             1.353872                  -0.114540   
3         -0.295401            -0.353166                   0.338484   
4          1.561511             0.299293                   1.301741   

   Area Population       Price  
0         0.681891   92.216482  
1         0.696954  174.721231  
2        -1.713135   -9.684095  
3         0.194384    4.530370  
4         0.742095  225.664028  
       Avg. Area Income  Avg. Area House Age  Avg. Area Number of Rooms  \
count       1000.000000          1000.000000                1000.000000   
mean           0.030086             0.024828                  -0.008255   
std            1.006964             1.011884                   1.006075   
min           -2.991136            -2.896255                  -3.241267   
25%           -0.670871            -0.677037                  -0.675299   
50%            0.021158             0.020210                  -0.007509   
75%            0.695878             0.693881                   0.642282   
max            3.926238             3.852731                   3.152057   

       Area Population        Price  
count      1000.000000  1000.000000  
mean          0.030624     1.934338  
std           0.963919   120.217187  
min          -3.019512  -403.191385  
25%          -0.612942   -81.489928  
50%           0.056187     3.977086  
75%           0.664881    85.366897  
max           3.243093   390.102673  
                           Avg. Area Income  Avg. Area House Age  \
Avg. Area Income                   1.000000             0.004602   
Avg. Area House Age                0.004602             1.000000   
Avg. Area Number of Rooms          0.033745            -0.032355   
Area Population                   -0.022976             0.008146   
Price                              0.450513             0.049799   

                           Avg. Area Number of Rooms  Area Population  \
Avg. Area Income                            0.033745        -0.022976   
Avg. Area House Age                        -0.032355         0.008146   
Avg. Area Number of Rooms                   1.000000        -0.038208   
Area Population                            -0.038208         1.000000   
Price                                       0.811138         0.347963   

                              Price  
Avg. Area Income           0.450513  
Avg. Area House Age        0.049799  
Avg. Area Number of Rooms  0.811138  
Area Population            0.347963  
Price                      1.000000  
No description has been provided for this image
Intercept: -0.6907061401783747
                           Coefficient
Avg. Area Income             51.611591
Avg. Area House Age           8.448170
Avg. Area Number of Rooms    97.291581
Area Population              48.348308
MAE: 8.214676975999433
MSE: 106.37666516565143
RMSE: 10.313906396979345
No description has been provided for this image
No description has been provided for this image
In [ ]:
 
In [ ]:
 
In [ ]:
#OR
In [2]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns

from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics

# Load the California Housing dataset
housing = fetch_california_housing(as_frame=True)
df = housing.frame

# View the first few rows
print(df.head())

# Optional: Check for missing values
print(df.isnull().sum())

# Visualize correlations
plt.figure(figsize=(10, 8))
sns.heatmap(df.corr(), annot=True, cmap='coolwarm')
plt.title("Feature Correlation Heatmap")
plt.show()

# Features and target
X = df.drop("MedHouseVal", axis=1)  # Independent variables
y = df["MedHouseVal"]               # Target variable (Median House Value)

# Split into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# Build and train Linear Regression model
model = LinearRegression()
model.fit(X_train, y_train)

# Model coefficients
print("Intercept:", model.intercept_)
coeff_df = pd.DataFrame(model.coef_, X.columns, columns=["Coefficient"])
print(coeff_df)

# Predict on test data
predictions = model.predict(X_test)

# Evaluate model
print("MAE:", metrics.mean_absolute_error(y_test, predictions))
print("MSE:", metrics.mean_squared_error(y_test, predictions))
print("RMSE:", np.sqrt(metrics.mean_squared_error(y_test, predictions)))

# Plot predicted vs actual
plt.scatter(y_test, predictions)
plt.xlabel("Actual House Value")
plt.ylabel("Predicted House Value")
plt.title("Actual vs Predicted House Values")
plt.show()

# Plot residuals
sns.histplot(y_test - predictions, kde=True, bins=30)
plt.title("Distribution of Prediction Errors")
plt.show()
   MedInc  HouseAge  AveRooms  AveBedrms  Population  AveOccup  Latitude  \
0  8.3252      41.0  6.984127   1.023810       322.0  2.555556     37.88   
1  8.3014      21.0  6.238137   0.971880      2401.0  2.109842     37.86   
2  7.2574      52.0  8.288136   1.073446       496.0  2.802260     37.85   
3  5.6431      52.0  5.817352   1.073059       558.0  2.547945     37.85   
4  3.8462      52.0  6.281853   1.081081       565.0  2.181467     37.85   

   Longitude  MedHouseVal  
0    -122.23        4.526  
1    -122.22        3.585  
2    -122.24        3.521  
3    -122.25        3.413  
4    -122.25        3.422  
MedInc         0
HouseAge       0
AveRooms       0
AveBedrms      0
Population     0
AveOccup       0
Latitude       0
Longitude      0
MedHouseVal    0
dtype: int64
No description has been provided for this image
Intercept: -37.023277706064064
            Coefficient
MedInc         0.448675
HouseAge       0.009724
AveRooms      -0.123323
AveBedrms      0.783145
Population    -0.000002
AveOccup      -0.003526
Latitude      -0.419792
Longitude     -0.433708
MAE: 0.5332001304956558
MSE: 0.555891598695244
RMSE: 0.7455813830127761
No description has been provided for this image
No description has been provided for this image
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
'''To better target their marketing strategies, a real estate agency wants to classify houses
into price categories: Low, Medium, and High. Build a Convolutional Neural Network
(CNN) to classify houses based on the features provided in the dataset.
In [ ]:
 
In [3]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns

from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from tensorflow.keras.utils import to_categorical

# 1. Load dataset
data = fetch_california_housing(as_frame=True)
df = data.frame

# 2. Create target categories: Low, Medium, High (based on percentiles)
df['PriceCategory'] = pd.qcut(df['MedHouseVal'], q=3, labels=['Low', 'Medium', 'High'])

# 3. Prepare features and target
X = df.drop(['MedHouseVal', 'PriceCategory'], axis=1)
y = df['PriceCategory']

# 4. Normalize features
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# 5. Reshape input for CNN: (samples, height, width, channels)
# We'll reshape 8 features into a 2x4 "image" with 1 channel
X_reshaped = X_scaled.reshape(-1, 2, 4, 1)

# Encode target labels
encoder = LabelEncoder()
y_encoded = encoder.fit_transform(y)
y_categorical = to_categorical(y_encoded)

# 6. Split dataset
X_train, X_test, y_train, y_test = train_test_split(X_reshaped, y_categorical, test_size=0.2, random_state=42)

# 7. Build CNN model
model = Sequential()
model.add(Conv2D(32, (2, 2), activation='relu', input_shape=(2, 4, 1)))
model.add(MaxPooling2D(pool_size=(1, 1)))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(3, activation='softmax'))  # 3 classes

model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

# 8. Train the model
history = model.fit(X_train, y_train, epochs=15, batch_size=32, validation_data=(X_test, y_test))

# 9. Evaluate
loss, accuracy = model.evaluate(X_test, y_test)
print(f"Test Accuracy: {accuracy:.2f}")

# Plot training history
plt.plot(history.history['accuracy'], label='Train')
plt.plot(history.history['val_accuracy'], label='Validation')
plt.title('Model Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
Epoch 1/15
C:\Users\Lenovo\AppData\Local\Programs\Python\Python312\Lib\site-packages\keras\src\layers\convolutional\base_conv.py:99: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(
516/516 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.6048 - loss: 0.8618 - val_accuracy: 0.7045 - val_loss: 0.6491
Epoch 2/15
516/516 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.7076 - loss: 0.6537 - val_accuracy: 0.7149 - val_loss: 0.6209
Epoch 3/15
516/516 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.7262 - loss: 0.6231 - val_accuracy: 0.7267 - val_loss: 0.6169
Epoch 4/15
516/516 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.7315 - loss: 0.6181 - val_accuracy: 0.7430 - val_loss: 0.5882
Epoch 5/15
516/516 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.7287 - loss: 0.6076 - val_accuracy: 0.7452 - val_loss: 0.5772
Epoch 6/15
516/516 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.7479 - loss: 0.5822 - val_accuracy: 0.7483 - val_loss: 0.5699
Epoch 7/15
516/516 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.7513 - loss: 0.5764 - val_accuracy: 0.7590 - val_loss: 0.5637
Epoch 8/15
516/516 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.7512 - loss: 0.5640 - val_accuracy: 0.7498 - val_loss: 0.5680
Epoch 9/15
516/516 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.7508 - loss: 0.5781 - val_accuracy: 0.7657 - val_loss: 0.5487
Epoch 10/15
516/516 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.7638 - loss: 0.5506 - val_accuracy: 0.7616 - val_loss: 0.5516
Epoch 11/15
516/516 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.7635 - loss: 0.5445 - val_accuracy: 0.7590 - val_loss: 0.5491
Epoch 12/15
516/516 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.7569 - loss: 0.5571 - val_accuracy: 0.7665 - val_loss: 0.5410
Epoch 13/15
516/516 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.7669 - loss: 0.5390 - val_accuracy: 0.7735 - val_loss: 0.5333
Epoch 14/15
516/516 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.7643 - loss: 0.5439 - val_accuracy: 0.7660 - val_loss: 0.5395
Epoch 15/15
516/516 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.7646 - loss: 0.5304 - val_accuracy: 0.7791 - val_loss: 0.5260
129/129 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.7781 - loss: 0.5284
Test Accuracy: 0.78
No description has been provided for this image
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
'''A financial analyst wants to model house price trends over increasing area numbers of
rooms. Use an LSTM-based Recurrent Neural Network to predict the next house price
value based on historical patterns in sorted data.
In [ ]:
 
In [4]:
'''To solve this time-series-like prediction problem using LSTM (Long Short-Term Memory) neural networks, we'll simulate a sequence-based format using an inbuilt dataset — fetch_california_housing from sklearn.

We'll use:

Area (AveRooms) and AveOccup (number of rooms and average occupancy),

sorted by AveRooms, to simulate house price trends,

and predict MedHouseVal (house price) using past sequences.

✅ Step-by-Step Plan:
Load the dataset (California housing).

Sort the data by AveRooms (as per the scenario).

Create sequences for LSTM.

Build and train the LSTM model.

Predict and evaluate.'''



import numpy as np
import pandas as pd
from sklearn.datasets import fetch_california_housing
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense
import matplotlib.pyplot as plt

# 1. Load the California Housing dataset
data = fetch_california_housing(as_frame=True)
df = data.frame

# 2. Select relevant features and sort by AveRooms
df_sorted = df.sort_values(by='AveRooms')
features = ['AveRooms', 'AveOccup']
target = 'MedHouseVal'

# 3. Normalize data
scaler_x = MinMaxScaler()
scaler_y = MinMaxScaler()
X_scaled = scaler_x.fit_transform(df_sorted[features])
y_scaled = scaler_y.fit_transform(df_sorted[[target]])

# 4. Create sequences for LSTM
def create_sequences(X, y, seq_length=10):
    X_seq, y_seq = [], []
    for i in range(len(X) - seq_length):
        X_seq.append(X[i:i+seq_length])
        y_seq.append(y[i+seq_length])
    return np.array(X_seq), np.array(y_seq)

sequence_length = 10
X_seq, y_seq = create_sequences(X_scaled, y_scaled, sequence_length)

# 5. Train-test split
X_train, X_test, y_train, y_test = train_test_split(X_seq, y_seq, test_size=0.2, random_state=42)

# 6. Build LSTM model
model = Sequential()
model.add(LSTM(64, activation='relu', input_shape=(sequence_length, len(features))))
model.add(Dense(1))

model.compile(optimizer='adam', loss='mse')
model.summary()

# 7. Train the model
history = model.fit(X_train, y_train, epochs=20, batch_size=32, validation_data=(X_test, y_test))

# 8. Evaluate the model
predictions = model.predict(X_test)
predicted_prices = scaler_y.inverse_transform(predictions)
actual_prices = scaler_y.inverse_transform(y_test)

# 9. Plot results
plt.figure(figsize=(10,6))
plt.plot(actual_prices, label='Actual Prices')
plt.plot(predicted_prices, label='Predicted Prices')
plt.title('House Price Prediction using LSTM')
plt.xlabel('Sample')
plt.ylabel('Price')
plt.legend()
plt.show()
C:\Users\Lenovo\AppData\Local\Programs\Python\Python312\Lib\site-packages\keras\src\layers\rnn\rnn.py:205: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(**kwargs)
Model: "sequential_1"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓
┃ Layer (type)                         ┃ Output Shape                ┃         Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩
│ lstm (LSTM)                          │ (None, 64)                  │          17,152 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dense_2 (Dense)                      │ (None, 1)                   │              65 │
└──────────────────────────────────────┴─────────────────────────────┴─────────────────┘
 Total params: 17,217 (67.25 KB)
 Trainable params: 17,217 (67.25 KB)
 Non-trainable params: 0 (0.00 B)
Epoch 1/20
516/516 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - loss: 0.0768 - val_loss: 0.0538
Epoch 2/20
516/516 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - loss: 0.0551 - val_loss: 0.0509
Epoch 3/20
516/516 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - loss: 0.0519 - val_loss: 0.0489
Epoch 4/20
516/516 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - loss: 0.0517 - val_loss: 0.0482
Epoch 5/20
516/516 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - loss: 0.0491 - val_loss: 0.0495
Epoch 6/20
516/516 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - loss: 0.0497 - val_loss: 0.0466
Epoch 7/20
516/516 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - loss: 0.0486 - val_loss: 0.0478
Epoch 8/20
516/516 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - loss: 0.0487 - val_loss: 0.0489
Epoch 9/20
516/516 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - loss: 0.0488 - val_loss: 0.0466
Epoch 10/20
516/516 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - loss: 0.0493 - val_loss: 0.0479
Epoch 11/20
516/516 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - loss: 0.0475 - val_loss: 0.0479
Epoch 12/20
516/516 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - loss: 0.0490 - val_loss: 0.0466
Epoch 13/20
516/516 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - loss: 0.0491 - val_loss: 0.0493
Epoch 14/20
516/516 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - loss: 0.0490 - val_loss: 0.0471
Epoch 15/20
516/516 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - loss: 0.0491 - val_loss: 0.0467
Epoch 16/20
516/516 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - loss: 0.0488 - val_loss: 0.0464
Epoch 17/20
516/516 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - loss: 0.0480 - val_loss: 0.0471
Epoch 18/20
516/516 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - loss: 0.0490 - val_loss: 0.0470
Epoch 19/20
516/516 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - loss: 0.0487 - val_loss: 0.0474
Epoch 20/20
516/516 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - loss: 0.0489 - val_loss: 0.0466
129/129 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step
No description has been provided for this image
In [ ]:
 
In [ ]:
 
In [ ]:
'''To streamline the approval of housing loans, a bank wants to classify houses into either
"High-value" or "Low-value" categories. Implement a CNN model and optimize
hyperparameters such as the number of layers, filter size, dropout, and learning rate to
improve classification accuracy.
In [6]:
'''To solve this binary classification problem using CNN, we can simulate house value classification with an inbuilt dataset like California Housing from sklearn.datasets. Although it's not image data, we can reshape the features to mimic image-like input for CNN purposes (a known approach in tabular-CNN research).

🔍 Problem Setup
Dataset: fetch_california_housing (inbuilt)

Target: Create a binary label:

"High-value": if MedHouseVal > median

"Low-value": otherwise

Approach: Use CNN on reshaped feature vectors.'''


import numpy as np
import pandas as pd
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from tensorflow.keras.optimizers import Adam
from sklearn.metrics import classification_report, confusion_matrix
import seaborn as sns
import matplotlib.pyplot as plt

# 1. Load dataset
data = fetch_california_housing()
X = data.data
y = data.target

# 2. Create binary classification label
median_value = np.median(y)
y_binary = (y > median_value).astype(int)  # High-value = 1, Low-value = 0

# 3. Normalize features
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# 4. Reshape X for CNN: reshape to (samples, 4, 2, 1)
X_reshaped = X_scaled.reshape(-1, 4, 2, 1)  # 8 features = 4x2 "image"

# 5. Split data
X_train, X_test, y_train, y_test = train_test_split(X_reshaped, y_binary, test_size=0.2, random_state=42)

# 6. Build CNN model
model = Sequential()
model.add(Conv2D(16, (2, 2), activation='relu', input_shape=(4, 2, 1)))
model.add(Dropout(0.3))

model.add(Flatten())
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))
# 7. Compile model with tuned learning rate
optimizer = Adam(learning_rate=0.001)
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])

# 8. Train model
history = model.fit(X_train, y_train, epochs=20, batch_size=32, validation_split=0.2)

# 9. Evaluate model
y_pred = model.predict(X_test)
y_pred_classes = (y_pred > 0.5).astype(int)

# 10. Confusion Matrix
cm = confusion_matrix(y_test, y_pred_classes)
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=["Low", "High"], yticklabels=["Low", "High"])
plt.title("Confusion Matrix")
plt.xlabel("Predicted")
plt.ylabel("Actual")
plt.show()

# Classification report
print(classification_report(y_test, y_pred_classes, target_names=["Low-value", "High-value"]))
Epoch 1/20
C:\Users\Lenovo\AppData\Local\Programs\Python\Python312\Lib\site-packages\keras\src\layers\convolutional\base_conv.py:99: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(
413/413 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.6911 - loss: 0.5974 - val_accuracy: 0.7993 - val_loss: 0.4153
Epoch 2/20
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.7904 - loss: 0.4441 - val_accuracy: 0.8247 - val_loss: 0.3833
Epoch 3/20
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8114 - loss: 0.4191 - val_accuracy: 0.8365 - val_loss: 0.3723
Epoch 4/20
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8133 - loss: 0.4080 - val_accuracy: 0.8332 - val_loss: 0.3699
Epoch 5/20
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8264 - loss: 0.3944 - val_accuracy: 0.8420 - val_loss: 0.3640
Epoch 6/20
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8293 - loss: 0.3880 - val_accuracy: 0.8401 - val_loss: 0.3599
Epoch 7/20
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8246 - loss: 0.3900 - val_accuracy: 0.8426 - val_loss: 0.3581
Epoch 8/20
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8295 - loss: 0.3805 - val_accuracy: 0.8417 - val_loss: 0.3594
Epoch 9/20
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8342 - loss: 0.3702 - val_accuracy: 0.8450 - val_loss: 0.3527
Epoch 10/20
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8381 - loss: 0.3691 - val_accuracy: 0.8462 - val_loss: 0.3520
Epoch 11/20
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8374 - loss: 0.3772 - val_accuracy: 0.8429 - val_loss: 0.3487
Epoch 12/20
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8290 - loss: 0.3784 - val_accuracy: 0.8486 - val_loss: 0.3471
Epoch 13/20
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8349 - loss: 0.3719 - val_accuracy: 0.8450 - val_loss: 0.3502
Epoch 14/20
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8372 - loss: 0.3707 - val_accuracy: 0.8477 - val_loss: 0.3455
Epoch 15/20
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8441 - loss: 0.3617 - val_accuracy: 0.8513 - val_loss: 0.3430
Epoch 16/20
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8385 - loss: 0.3632 - val_accuracy: 0.8462 - val_loss: 0.3425
Epoch 17/20
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8405 - loss: 0.3656 - val_accuracy: 0.8477 - val_loss: 0.3409
Epoch 18/20
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8440 - loss: 0.3582 - val_accuracy: 0.8453 - val_loss: 0.3387
Epoch 19/20
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8430 - loss: 0.3576 - val_accuracy: 0.8468 - val_loss: 0.3420
Epoch 20/20
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8472 - loss: 0.3557 - val_accuracy: 0.8474 - val_loss: 0.3379
129/129 ━━━━━━━━━━━━━━━━━━━━ 0s 997us/step
No description has been provided for this image
              precision    recall  f1-score   support

   Low-value       0.86      0.84      0.85      2077
  High-value       0.84      0.86      0.85      2051

    accuracy                           0.85      4128
   macro avg       0.85      0.85      0.85      4128
weighted avg       0.85      0.85      0.85      4128

In [ ]:
 
In [ ]:
 
In [ ]:
'''A property review site wants to predict user sentiment (positive or negative) based on
their written reviews. Build a sentiment classification model using an RNN on simulated
review text data.
In [ ]:
 
In [8]:
import tensorflow as tf
from tensorflow.keras.datasets import imdb
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, LSTM, Dense

# Load dataset (top 10,000 words)
num_words = 10000
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=num_words)

# Pad sequences to same length
max_len = 200
x_train = pad_sequences(x_train, maxlen=max_len)
x_test = pad_sequences(x_test, maxlen=max_len)

# Build RNN model
model = Sequential()
model.add(Embedding(input_dim=num_words, output_dim=64))
model.add(LSTM(64, return_sequences=False))
model.add(Dense(1, activation='sigmoid'))

# Compile
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

# Train
model.fit(x_train, y_train, epochs=3, batch_size=64, validation_split=0.2)

# Evaluate
loss, acc = model.evaluate(x_test, y_test)
print(f"Test Accuracy: {acc:.4f}")
Epoch 1/3
313/313 ━━━━━━━━━━━━━━━━━━━━ 33s 99ms/step - accuracy: 0.6321 - loss: 0.6120 - val_accuracy: 0.8268 - val_loss: 0.3937
Epoch 2/3
313/313 ━━━━━━━━━━━━━━━━━━━━ 29s 92ms/step - accuracy: 0.8874 - loss: 0.2846 - val_accuracy: 0.8618 - val_loss: 0.3237
Epoch 3/3
313/313 ━━━━━━━━━━━━━━━━━━━━ 31s 98ms/step - accuracy: 0.9251 - loss: 0.2017 - val_accuracy: 0.8686 - val_loss: 0.3285
782/782 ━━━━━━━━━━━━━━━━━━━━ 17s 21ms/step - accuracy: 0.8652 - loss: 0.3439
Test Accuracy: 0.8666
In [ ]:
 
In [ ]:
#OR 
In [9]:
import pandas as pd

# Sample property reviews (text + sentiment)
data = {
    'review': [
        "The house was spacious and well-lit. Loved it!",
        "Terrible neighborhood, never buying here again.",
        "Affordable price with a beautiful garden.",
        "Too small for a family and overpriced.",
        "Amazing view and well connected to city.",
        "Old construction and leaky roof.",
        "Great location and peaceful surroundings.",
        "Noisy area, couldn’t sleep at night.",
        "Perfect for a small family, highly recommend.",
        "Water issues and bad maintenance."
    ],
    'sentiment': [1, 0, 1, 0, 1, 0, 1, 0, 1, 0]  # 1 = Positive, 0 = Negative
}

df = pd.DataFrame(data)


from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences

# Tokenize text
tokenizer = Tokenizer(num_words=1000, oov_token="<OOV>")
tokenizer.fit_on_texts(df['review'])

sequences = tokenizer.texts_to_sequences(df['review'])
padded = pad_sequences(sequences, padding='post', maxlen=20)

X = padded
y = df['sentiment'].values



from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, LSTM, Dense

model = Sequential([
    Embedding(input_dim=1000, output_dim=16),
    LSTM(32),
    Dense(1, activation='sigmoid')
])

model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.summary()



model.fit(X, y, epochs=10, batch_size=2, verbose=1)



# Try prediction on new review
test_review = ["The apartment was dirty and badly managed."]
seq = tokenizer.texts_to_sequences(test_review)
padded_seq = pad_sequences(seq, maxlen=20, padding='post')
prediction = model.predict(padded_seq)

print("Sentiment:", "Positive" if prediction[0][0] > 0.5 else "Negative")
Model: "sequential_6"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓
┃ Layer (type)                         ┃ Output Shape                ┃         Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩
│ embedding_2 (Embedding)              │ ?                           │     0 (unbuilt) │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ lstm_2 (LSTM)                        │ ?                           │     0 (unbuilt) │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dense_8 (Dense)                      │ ?                           │     0 (unbuilt) │
└──────────────────────────────────────┴─────────────────────────────┴─────────────────┘
 Total params: 0 (0.00 B)
 Trainable params: 0 (0.00 B)
 Non-trainable params: 0 (0.00 B)
Epoch 1/10
5/5 ━━━━━━━━━━━━━━━━━━━━ 1s 5ms/step - accuracy: 0.3639 - loss: 0.6939
Epoch 2/10
5/5 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - accuracy: 0.3764 - loss: 0.6940 
Epoch 3/10
5/5 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - accuracy: 0.5903 - loss: 0.6934 
Epoch 4/10
5/5 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - accuracy: 0.4306 - loss: 0.6939 
Epoch 5/10
5/5 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - accuracy: 0.5000 - loss: 0.6927 
Epoch 6/10
5/5 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - accuracy: 0.6528 - loss: 0.6926 
Epoch 7/10
5/5 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - accuracy: 0.4097 - loss: 0.6932 
Epoch 8/10
5/5 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - accuracy: 0.7764 - loss: 0.6921 
Epoch 9/10
5/5 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - accuracy: 0.6167 - loss: 0.6901 
Epoch 10/10
5/5 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - accuracy: 0.7931 - loss: 0.6894 
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 169ms/step
Sentiment: Positive
In [ ]:
 
In [ ]:
 
In [ ]:
'''To prioritize high-value listings, a real estate firm wants to predict whether a house falls
in the "High" or "Low" price category. Build a Random Forest classifier to perform binary
classification based on the house’s features.
In [ ]:
 
In [10]:
import pandas as pd
from sklearn.datasets import fetch_california_housing
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix

# Load dataset
housing = fetch_california_housing(as_frame=True)
df = housing.frame

# Create binary target: 'High' if above median, else 'Low'
median_price = df['MedHouseVal'].median()
df['PriceCategory'] = df['MedHouseVal'].apply(lambda x: 1 if x > median_price else 0)

# Features and labels
X = df.drop(['MedHouseVal', 'PriceCategory'], axis=1)
y = df['PriceCategory']

# Train-test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# Build Random Forest Classifier
model = RandomForestClassifier(n_estimators=100, random_state=42)
model.fit(X_train, y_train)

# Predictions
y_pred = model.predict(X_test)

# Evaluate
print("Confusion Matrix:\n", confusion_matrix(y_test, y_pred))
print("\nClassification Report:\n", classification_report(y_test, y_pred, target_names=["Low", "High"]))
Confusion Matrix:
 [[1864  213]
 [ 227 1824]]

Classification Report:
               precision    recall  f1-score   support

         Low       0.89      0.90      0.89      2077
        High       0.90      0.89      0.89      2051

    accuracy                           0.89      4128
   macro avg       0.89      0.89      0.89      4128
weighted avg       0.89      0.89      0.89      4128

In [ ]:
 
In [ ]:
 
In [ ]:
'''To identify regional housing market segments, an analyst wants to group properties
based on similar features. Apply the K-Means clustering algorithm to segment the
housing dataset and visualize the clusters.
In [ ]:
 
In [11]:
import pandas as pd
from sklearn.datasets import fetch_california_housing
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import seaborn as sns

# Load California housing dataset
housing = fetch_california_housing(as_frame=True)
df = housing.frame

# Select features for clustering (excluding target variables)
X = df.drop(['MedHouseVal'], axis=1)

# Scale the features for better clustering performance
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# Apply KMeans clustering
kmeans = KMeans(n_clusters=5, random_state=42)  # You can adjust the number of clusters
kmeans.fit(X_scaled)

# Add cluster labels to the dataframe
df['Cluster'] = kmeans.labels_

# Visualize the clusters
plt.figure(figsize=(10, 6))
sns.scatterplot(x=df['AveRooms'], y=df['AveOccup'], hue=df['Cluster'], palette='viridis', s=100, alpha=0.6, edgecolor='w')
plt.title('K-Means Clustering: Housing Segments')
plt.xlabel('Average Rooms')
plt.ylabel('Average Occupants')
plt.legend(title='Cluster')
plt.show()
No description has been provided for this image
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]: